From 7285ea7eaf614c70f6872be0e7ccb74f4a6b6c44 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Wed, 14 Jul 2021 18:47:14 +0000 Subject: [PATCH 1/2] feat: Adds additional_experiments field to AutoMlTablesInputs feat: Adds additional_experiments field to AutoMlForecastingInputs feat: Adds two new ModelType constants for Video Action Recognition training jobs PiperOrigin-RevId: 384715948 Source-Link: https://github.com/googleapis/googleapis/commit/83c372893a7ffd0a8b3ca8e3b4b49cfd2acd3145 Source-Link: https://github.com/googleapis/googleapis-gen/commit/4c0c40d19e43f32f43ad1e677a1354b2f9d36abc --- owl-bot-staging/v1/.coveragerc | 17 + owl-bot-staging/v1/MANIFEST.in | 2 + owl-bot-staging/v1/README.rst | 49 + .../v1/docs/aiplatform_v1/dataset_service.rst | 10 + .../docs/aiplatform_v1/endpoint_service.rst | 10 + .../v1/docs/aiplatform_v1/job_service.rst | 10 + .../docs/aiplatform_v1/migration_service.rst | 10 + .../v1/docs/aiplatform_v1/model_service.rst | 10 + .../docs/aiplatform_v1/pipeline_service.rst | 10 + .../docs/aiplatform_v1/prediction_service.rst | 6 + .../v1/docs/aiplatform_v1/services.rst | 13 + .../aiplatform_v1/specialist_pool_service.rst | 10 + .../v1/docs/aiplatform_v1/types.rst | 7 + owl-bot-staging/v1/docs/conf.py | 376 + .../v1/docs/definition_v1/services.rst | 4 + .../v1/docs/definition_v1/types.rst | 7 + owl-bot-staging/v1/docs/index.rst | 7 + .../v1/docs/instance_v1/services.rst | 4 + owl-bot-staging/v1/docs/instance_v1/types.rst | 7 + .../v1/docs/params_v1/services.rst | 4 + owl-bot-staging/v1/docs/params_v1/types.rst | 7 + .../v1/docs/prediction_v1/services.rst | 4 + .../v1/docs/prediction_v1/types.rst | 7 + .../v1/google/cloud/aiplatform/__init__.py | 389 + .../v1/google/cloud/aiplatform/py.typed | 2 + .../v1/schema/predict/instance/__init__.py | 37 + .../v1/schema/predict/instance/py.typed | 2 + .../v1/schema/predict/instance_v1/__init__.py | 38 + .../predict/instance_v1/gapic_metadata.json | 7 + .../v1/schema/predict/instance_v1/py.typed | 2 + .../predict/instance_v1/services/__init__.py | 15 + .../predict/instance_v1/types/__init__.py | 54 + .../instance_v1/types/image_classification.py | 55 + .../types/image_object_detection.py | 55 + .../instance_v1/types/image_segmentation.py | 49 + .../instance_v1/types/text_classification.py | 48 + .../instance_v1/types/text_extraction.py | 61 + .../instance_v1/types/text_sentiment.py | 48 + .../types/video_action_recognition.py | 72 + .../instance_v1/types/video_classification.py | 72 + .../types/video_object_tracking.py | 72 + .../v1/schema/predict/params/__init__.py | 31 + .../v1/schema/predict/params/py.typed | 2 + .../v1/schema/predict/params_v1/__init__.py | 32 + .../predict/params_v1/gapic_metadata.json | 7 + .../v1/schema/predict/params_v1/py.typed | 2 + .../predict/params_v1/services/__init__.py | 15 + .../predict/params_v1/types/__init__.py | 42 + .../params_v1/types/image_classification.py | 51 + .../params_v1/types/image_object_detection.py | 52 + .../params_v1/types/image_segmentation.py | 44 + .../types/video_action_recognition.py | 52 + .../params_v1/types/video_classification.py | 94 + .../params_v1/types/video_object_tracking.py | 60 + .../v1/schema/predict/prediction/__init__.py | 39 + .../v1/schema/predict/prediction/py.typed | 2 + .../schema/predict/prediction_v1/__init__.py | 40 + .../predict/prediction_v1/gapic_metadata.json | 7 + .../v1/schema/predict/prediction_v1/py.typed | 2 + .../prediction_v1/services/__init__.py | 15 + .../predict/prediction_v1/types/__init__.py | 58 + .../prediction_v1/types/classification.py | 56 + .../types/image_object_detection.py | 72 + .../prediction_v1/types/image_segmentation.py | 61 + .../types/tabular_classification.py | 51 + .../prediction_v1/types/tabular_regression.py | 52 + .../prediction_v1/types/text_extraction.py | 77 + .../prediction_v1/types/text_sentiment.py | 47 + .../types/video_action_recognition.py | 84 + .../types/video_classification.py | 102 + .../types/video_object_tracking.py | 144 + .../schema/trainingjob/definition/__init__.py | 69 + .../v1/schema/trainingjob/definition/py.typed | 2 + .../trainingjob/definition_v1/__init__.py | 70 + .../definition_v1/gapic_metadata.json | 7 + .../schema/trainingjob/definition_v1/py.typed | 2 + .../definition_v1/services/__init__.py | 15 + .../definition_v1/types/__init__.py | 90 + .../types/automl_image_classification.py | 156 + .../types/automl_image_object_detection.py | 137 + .../types/automl_image_segmentation.py | 131 + .../definition_v1/types/automl_tables.py | 499 + .../types/automl_text_classification.py | 57 + .../types/automl_text_extraction.py | 48 + .../types/automl_text_sentiment.py | 66 + .../types/automl_video_action_recognition.py | 65 + .../types/automl_video_classification.py | 64 + .../types/automl_video_object_tracking.py | 67 + .../export_evaluated_data_items_config.py | 57 + .../v1/google/cloud/aiplatform_v1/__init__.py | 390 + .../cloud/aiplatform_v1/gapic_metadata.json | 771 ++ .../v1/google/cloud/aiplatform_v1/py.typed | 2 + .../cloud/aiplatform_v1/services/__init__.py | 15 + .../services/dataset_service/__init__.py | 22 + .../services/dataset_service/async_client.py | 1074 ++ .../services/dataset_service/client.py | 1288 +++ .../services/dataset_service/pagers.py | 387 + .../dataset_service/transports/__init__.py | 33 + .../dataset_service/transports/base.py | 304 + .../dataset_service/transports/grpc.py | 506 + .../transports/grpc_asyncio.py | 510 + .../services/endpoint_service/__init__.py | 22 + .../services/endpoint_service/async_client.py | 860 ++ .../services/endpoint_service/client.py | 1054 ++ .../services/endpoint_service/pagers.py | 141 + .../endpoint_service/transports/__init__.py | 33 + .../endpoint_service/transports/base.py | 261 + .../endpoint_service/transports/grpc.py | 430 + .../transports/grpc_asyncio.py | 434 + .../services/job_service/__init__.py | 22 + .../services/job_service/async_client.py | 1913 ++++ .../services/job_service/client.py | 2163 ++++ .../services/job_service/pagers.py | 510 + .../job_service/transports/__init__.py | 33 + .../services/job_service/transports/base.py | 450 + .../services/job_service/transports/grpc.py | 818 ++ .../job_service/transports/grpc_asyncio.py | 822 ++ .../services/migration_service/__init__.py | 22 + .../migration_service/async_client.py | 376 + .../services/migration_service/client.py | 617 ++ .../services/migration_service/pagers.py | 141 + .../migration_service/transports/__init__.py | 33 + .../migration_service/transports/base.py | 189 + .../migration_service/transports/grpc.py | 303 + .../transports/grpc_asyncio.py | 307 + .../services/model_service/__init__.py | 22 + .../services/model_service/async_client.py | 1059 ++ .../services/model_service/client.py | 1282 +++ .../services/model_service/pagers.py | 387 + .../model_service/transports/__init__.py | 33 + .../services/model_service/transports/base.py | 305 + .../services/model_service/transports/grpc.py | 514 + .../model_service/transports/grpc_asyncio.py | 518 + .../services/pipeline_service/__init__.py | 22 + .../services/pipeline_service/async_client.py | 1069 ++ .../services/pipeline_service/client.py | 1328 +++ .../services/pipeline_service/pagers.py | 264 + .../pipeline_service/transports/__init__.py | 33 + .../pipeline_service/transports/base.py | 306 + .../pipeline_service/transports/grpc.py | 539 + .../transports/grpc_asyncio.py | 543 + .../services/prediction_service/__init__.py | 22 + .../prediction_service/async_client.py | 272 + .../services/prediction_service/client.py | 459 + .../prediction_service/transports/__init__.py | 33 + .../prediction_service/transports/base.py | 168 + .../prediction_service/transports/grpc.py | 252 + .../transports/grpc_asyncio.py | 256 + .../specialist_pool_service/__init__.py | 22 + .../specialist_pool_service/async_client.py | 650 ++ .../specialist_pool_service/client.py | 837 ++ .../specialist_pool_service/pagers.py | 141 + .../transports/__init__.py | 33 + .../transports/base.py | 232 + .../transports/grpc.py | 382 + .../transports/grpc_asyncio.py | 386 + .../cloud/aiplatform_v1/types/__init__.py | 429 + .../aiplatform_v1/types/accelerator_type.py | 38 + .../cloud/aiplatform_v1/types/annotation.py | 129 + .../aiplatform_v1/types/annotation_spec.py | 78 + .../cloud/aiplatform_v1/types/artifact.py | 114 + .../types/batch_prediction_job.py | 407 + .../aiplatform_v1/types/completion_stats.py | 63 + .../cloud/aiplatform_v1/types/context.py | 97 + .../cloud/aiplatform_v1/types/custom_job.py | 391 + .../cloud/aiplatform_v1/types/data_item.py | 101 + .../aiplatform_v1/types/data_labeling_job.py | 332 + .../cloud/aiplatform_v1/types/dataset.py | 220 + .../aiplatform_v1/types/dataset_service.py | 542 + .../aiplatform_v1/types/deployed_model_ref.py | 47 + .../aiplatform_v1/types/encryption_spec.py | 47 + .../cloud/aiplatform_v1/types/endpoint.py | 238 + .../aiplatform_v1/types/endpoint_service.py | 394 + .../cloud/aiplatform_v1/types/env_var.py | 56 + .../cloud/aiplatform_v1/types/execution.py | 110 + .../types/hyperparameter_tuning_job.py | 182 + .../v1/google/cloud/aiplatform_v1/types/io.py | 130 + .../cloud/aiplatform_v1/types/job_service.py | 723 ++ .../cloud/aiplatform_v1/types/job_state.py | 41 + .../aiplatform_v1/types/machine_resources.py | 307 + .../types/manual_batch_tuning_parameters.py | 49 + .../types/migratable_resource.py | 209 + .../aiplatform_v1/types/migration_service.py | 439 + .../google/cloud/aiplatform_v1/types/model.py | 707 ++ .../aiplatform_v1/types/model_evaluation.py | 86 + .../types/model_evaluation_slice.py | 109 + .../aiplatform_v1/types/model_service.py | 583 ++ .../cloud/aiplatform_v1/types/operation.py | 81 + .../cloud/aiplatform_v1/types/pipeline_job.py | 436 + .../aiplatform_v1/types/pipeline_service.py | 381 + .../aiplatform_v1/types/pipeline_state.py | 40 + .../aiplatform_v1/types/prediction_service.py | 103 + .../aiplatform_v1/types/specialist_pool.py | 80 + .../types/specialist_pool_service.py | 237 + .../google/cloud/aiplatform_v1/types/study.py | 609 ++ .../aiplatform_v1/types/training_pipeline.py | 547 + .../types/user_action_reference.py | 64 + .../google/cloud/aiplatform_v1/types/value.py | 55 + owl-bot-staging/v1/mypy.ini | 3 + owl-bot-staging/v1/noxfile.py | 132 + .../v1/schema/predict/instance/.coveragerc | 17 + .../v1/schema/predict/instance/MANIFEST.in | 2 + .../v1/schema/predict/instance/README.rst | 49 + .../v1/schema/predict/instance/docs/conf.py | 376 + .../v1/schema/predict/instance/docs/index.rst | 7 + .../instance/docs/instance_v1/services.rst | 4 + .../instance/docs/instance_v1/types.rst | 7 + .../v1/schema/predict/instance/__init__.py | 37 + .../v1/schema/predict/instance/py.typed | 2 + .../v1/schema/predict/instance_v1/__init__.py | 38 + .../predict/instance_v1/gapic_metadata.json | 7 + .../v1/schema/predict/instance_v1/py.typed | 2 + .../predict/instance_v1/services/__init__.py | 15 + .../predict/instance_v1/types/__init__.py | 54 + .../instance_v1/types/image_classification.py | 55 + .../types/image_object_detection.py | 55 + .../instance_v1/types/image_segmentation.py | 49 + .../instance_v1/types/text_classification.py | 48 + .../instance_v1/types/text_extraction.py | 61 + .../instance_v1/types/text_sentiment.py | 48 + .../types/video_action_recognition.py | 72 + .../instance_v1/types/video_classification.py | 72 + .../types/video_object_tracking.py | 72 + .../v1/schema/predict/instance/mypy.ini | 3 + .../v1/schema/predict/instance/noxfile.py | 132 + .../scripts/fixup_instance_v1_keywords.py | 175 + .../v1/schema/predict/instance/setup.py | 53 + .../schema/predict/instance/tests/__init__.py | 16 + .../predict/instance/tests/unit/__init__.py | 16 + .../instance/tests/unit/gapic/__init__.py | 16 + .../tests/unit/gapic/instance_v1/__init__.py | 16 + .../v1/schema/predict/params/.coveragerc | 17 + .../v1/schema/predict/params/MANIFEST.in | 2 + .../v1/schema/predict/params/README.rst | 49 + .../v1/schema/predict/params/docs/conf.py | 376 + .../v1/schema/predict/params/docs/index.rst | 7 + .../params/docs/params_v1/services.rst | 4 + .../predict/params/docs/params_v1/types.rst | 7 + .../v1/schema/predict/params/__init__.py | 31 + .../v1/schema/predict/params/py.typed | 2 + .../v1/schema/predict/params_v1/__init__.py | 32 + .../predict/params_v1/gapic_metadata.json | 7 + .../v1/schema/predict/params_v1/py.typed | 2 + .../predict/params_v1/services/__init__.py | 15 + .../predict/params_v1/types/__init__.py | 42 + .../params_v1/types/image_classification.py | 51 + .../params_v1/types/image_object_detection.py | 52 + .../params_v1/types/image_segmentation.py | 44 + .../types/video_action_recognition.py | 52 + .../params_v1/types/video_classification.py | 94 + .../params_v1/types/video_object_tracking.py | 60 + .../v1/schema/predict/params/mypy.ini | 3 + .../v1/schema/predict/params/noxfile.py | 132 + .../scripts/fixup_params_v1_keywords.py | 175 + .../v1/schema/predict/params/setup.py | 53 + .../schema/predict/params/tests/__init__.py | 16 + .../predict/params/tests/unit/__init__.py | 16 + .../params/tests/unit/gapic/__init__.py | 16 + .../tests/unit/gapic/params_v1/__init__.py | 16 + .../v1/schema/predict/prediction/.coveragerc | 17 + .../v1/schema/predict/prediction/MANIFEST.in | 2 + .../v1/schema/predict/prediction/README.rst | 49 + .../v1/schema/predict/prediction/docs/conf.py | 376 + .../schema/predict/prediction/docs/index.rst | 7 + .../docs/prediction_v1/services.rst | 4 + .../prediction/docs/prediction_v1/types.rst | 7 + .../v1/schema/predict/prediction/__init__.py | 39 + .../v1/schema/predict/prediction/py.typed | 2 + .../schema/predict/prediction_v1/__init__.py | 40 + .../predict/prediction_v1/gapic_metadata.json | 7 + .../v1/schema/predict/prediction_v1/py.typed | 2 + .../prediction_v1/services/__init__.py | 15 + .../predict/prediction_v1/types/__init__.py | 58 + .../prediction_v1/types/classification.py | 56 + .../types/image_object_detection.py | 72 + .../prediction_v1/types/image_segmentation.py | 61 + .../types/tabular_classification.py | 51 + .../prediction_v1/types/tabular_regression.py | 52 + .../prediction_v1/types/text_extraction.py | 77 + .../prediction_v1/types/text_sentiment.py | 47 + .../types/video_action_recognition.py | 84 + .../types/video_classification.py | 102 + .../types/video_object_tracking.py | 144 + .../v1/schema/predict/prediction/mypy.ini | 3 + .../v1/schema/predict/prediction/noxfile.py | 132 + .../scripts/fixup_prediction_v1_keywords.py | 175 + .../v1/schema/predict/prediction/setup.py | 53 + .../predict/prediction/tests/__init__.py | 16 + .../predict/prediction/tests/unit/__init__.py | 16 + .../prediction/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/prediction_v1/__init__.py | 16 + .../scripts/fixup_aiplatform_v1_keywords.py | 240 + .../scripts/fixup_definition_v1_keywords.py | 175 + .../v1/scripts/fixup_instance_v1_keywords.py | 175 + .../v1/scripts/fixup_params_v1_keywords.py | 175 + .../scripts/fixup_prediction_v1_keywords.py | 175 + owl-bot-staging/v1/setup.py | 53 + owl-bot-staging/v1/tests/__init__.py | 16 + owl-bot-staging/v1/tests/unit/__init__.py | 16 + .../v1/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/aiplatform_v1/__init__.py | 16 + .../aiplatform_v1/test_dataset_service.py | 3983 ++++++++ .../aiplatform_v1/test_endpoint_service.py | 2865 ++++++ .../gapic/aiplatform_v1/test_job_service.py | 6666 ++++++++++++ .../aiplatform_v1/test_migration_service.py | 1753 ++++ .../gapic/aiplatform_v1/test_model_service.py | 4051 ++++++++ .../aiplatform_v1/test_pipeline_service.py | 3916 +++++++ .../aiplatform_v1/test_prediction_service.py | 1175 +++ .../test_specialist_pool_service.py | 2346 +++++ .../unit/gapic/definition_v1/__init__.py | 16 + .../tests/unit/gapic/instance_v1/__init__.py | 16 + .../v1/tests/unit/gapic/params_v1/__init__.py | 16 + .../unit/gapic/prediction_v1/__init__.py | 16 + owl-bot-staging/v1beta1/.coveragerc | 17 + owl-bot-staging/v1beta1/MANIFEST.in | 2 + owl-bot-staging/v1beta1/README.rst | 49 + .../aiplatform_v1beta1/dataset_service.rst | 10 + .../aiplatform_v1beta1/endpoint_service.rst | 10 + .../featurestore_online_serving_service.rst | 6 + .../featurestore_service.rst | 10 + .../index_endpoint_service.rst | 10 + .../docs/aiplatform_v1beta1/index_service.rst | 10 + .../docs/aiplatform_v1beta1/job_service.rst | 10 + .../aiplatform_v1beta1/metadata_service.rst | 10 + .../aiplatform_v1beta1/migration_service.rst | 10 + .../docs/aiplatform_v1beta1/model_service.rst | 10 + .../aiplatform_v1beta1/pipeline_service.rst | 10 + .../aiplatform_v1beta1/prediction_service.rst | 6 + .../docs/aiplatform_v1beta1/services.rst | 20 + .../specialist_pool_service.rst | 10 + .../tensorboard_service.rst | 10 + .../v1beta1/docs/aiplatform_v1beta1/types.rst | 7 + .../aiplatform_v1beta1/vizier_service.rst | 10 + owl-bot-staging/v1beta1/docs/conf.py | 376 + .../docs/definition_v1beta1/services.rst | 4 + .../v1beta1/docs/definition_v1beta1/types.rst | 7 + owl-bot-staging/v1beta1/docs/index.rst | 7 + .../docs/instance_v1beta1/services.rst | 4 + .../v1beta1/docs/instance_v1beta1/types.rst | 7 + .../v1beta1/docs/params_v1beta1/services.rst | 4 + .../v1beta1/docs/params_v1beta1/types.rst | 7 + .../docs/prediction_v1beta1/services.rst | 4 + .../v1beta1/docs/prediction_v1beta1/types.rst | 7 + .../google/cloud/aiplatform/__init__.py | 875 ++ .../v1beta1/google/cloud/aiplatform/py.typed | 2 + .../schema/predict/instance/__init__.py | 37 + .../v1beta1/schema/predict/instance/py.typed | 2 + .../predict/instance_v1beta1/__init__.py | 38 + .../instance_v1beta1/gapic_metadata.json | 7 + .../schema/predict/instance_v1beta1/py.typed | 2 + .../instance_v1beta1/services/__init__.py | 15 + .../instance_v1beta1/types/__init__.py | 54 + .../types/image_classification.py | 55 + .../types/image_object_detection.py | 55 + .../types/image_segmentation.py | 49 + .../types/text_classification.py | 48 + .../instance_v1beta1/types/text_extraction.py | 61 + .../instance_v1beta1/types/text_sentiment.py | 48 + .../types/video_action_recognition.py | 72 + .../types/video_classification.py | 72 + .../types/video_object_tracking.py | 72 + .../v1beta1/schema/predict/params/__init__.py | 31 + .../v1beta1/schema/predict/params/py.typed | 2 + .../schema/predict/params_v1beta1/__init__.py | 32 + .../params_v1beta1/gapic_metadata.json | 7 + .../schema/predict/params_v1beta1/py.typed | 2 + .../params_v1beta1/services/__init__.py | 15 + .../predict/params_v1beta1/types/__init__.py | 42 + .../types/image_classification.py | 51 + .../types/image_object_detection.py | 52 + .../types/image_segmentation.py | 44 + .../types/video_action_recognition.py | 52 + .../types/video_classification.py | 94 + .../types/video_object_tracking.py | 60 + .../schema/predict/prediction/__init__.py | 41 + .../schema/predict/prediction/py.typed | 2 + .../predict/prediction_v1beta1/__init__.py | 42 + .../prediction_v1beta1/gapic_metadata.json | 7 + .../predict/prediction_v1beta1/py.typed | 2 + .../prediction_v1beta1/services/__init__.py | 15 + .../prediction_v1beta1/types/__init__.py | 62 + .../types/classification.py | 56 + .../types/image_object_detection.py | 72 + .../types/image_segmentation.py | 61 + .../types/tabular_classification.py | 51 + .../types/tabular_regression.py | 52 + .../types/text_extraction.py | 77 + .../types/text_sentiment.py | 47 + .../types/time_series_forecasting.py | 40 + .../types/video_action_recognition.py | 84 + .../types/video_classification.py | 102 + .../types/video_object_tracking.py | 144 + .../schema/trainingjob/definition/__init__.py | 75 + .../schema/trainingjob/definition/py.typed | 2 + .../definition_v1beta1/__init__.py | 76 + .../definition_v1beta1/gapic_metadata.json | 7 + .../trainingjob/definition_v1beta1/py.typed | 2 + .../definition_v1beta1/services/__init__.py | 15 + .../definition_v1beta1/types/__init__.py | 98 + .../types/automl_image_classification.py | 156 + .../types/automl_image_object_detection.py | 137 + .../types/automl_image_segmentation.py | 131 + .../definition_v1beta1/types/automl_tables.py | 499 + .../types/automl_text_classification.py | 57 + .../types/automl_text_extraction.py | 48 + .../types/automl_text_sentiment.py | 66 + .../types/automl_time_series_forecasting.py | 477 + .../types/automl_video_action_recognition.py | 65 + .../types/automl_video_classification.py | 64 + .../types/automl_video_object_tracking.py | 67 + .../export_evaluated_data_items_config.py | 57 + .../cloud/aiplatform_v1beta1/__init__.py | 876 ++ .../aiplatform_v1beta1/gapic_metadata.json | 1949 ++++ .../google/cloud/aiplatform_v1beta1/py.typed | 2 + .../aiplatform_v1beta1/services/__init__.py | 15 + .../services/dataset_service/__init__.py | 22 + .../services/dataset_service/async_client.py | 1074 ++ .../services/dataset_service/client.py | 1288 +++ .../services/dataset_service/pagers.py | 387 + .../dataset_service/transports/__init__.py | 33 + .../dataset_service/transports/base.py | 304 + .../dataset_service/transports/grpc.py | 506 + .../transports/grpc_asyncio.py | 510 + .../services/endpoint_service/__init__.py | 22 + .../services/endpoint_service/async_client.py | 860 ++ .../services/endpoint_service/client.py | 1054 ++ .../services/endpoint_service/pagers.py | 141 + .../endpoint_service/transports/__init__.py | 33 + .../endpoint_service/transports/base.py | 261 + .../endpoint_service/transports/grpc.py | 430 + .../transports/grpc_asyncio.py | 434 + .../__init__.py | 22 + .../async_client.py | 325 + .../client.py | 512 + .../transports/__init__.py | 33 + .../transports/base.py | 182 + .../transports/grpc.py | 283 + .../transports/grpc_asyncio.py | 287 + .../services/featurestore_service/__init__.py | 22 + .../featurestore_service/async_client.py | 2060 ++++ .../services/featurestore_service/client.py | 2265 +++++ .../services/featurestore_service/pagers.py | 509 + .../transports/__init__.py | 33 + .../featurestore_service/transports/base.py | 446 + .../featurestore_service/transports/grpc.py | 803 ++ .../transports/grpc_asyncio.py | 807 ++ .../index_endpoint_service/__init__.py | 22 + .../index_endpoint_service/async_client.py | 817 ++ .../services/index_endpoint_service/client.py | 1013 ++ .../services/index_endpoint_service/pagers.py | 141 + .../transports/__init__.py | 33 + .../index_endpoint_service/transports/base.py | 261 + .../index_endpoint_service/transports/grpc.py | 433 + .../transports/grpc_asyncio.py | 437 + .../services/index_service/__init__.py | 22 + .../services/index_service/async_client.py | 633 ++ .../services/index_service/client.py | 829 ++ .../services/index_service/pagers.py | 141 + .../index_service/transports/__init__.py | 33 + .../services/index_service/transports/base.py | 232 + .../services/index_service/transports/grpc.py | 379 + .../index_service/transports/grpc_asyncio.py | 383 + .../services/job_service/__init__.py | 22 + .../services/job_service/async_client.py | 2615 +++++ .../services/job_service/client.py | 2892 ++++++ .../services/job_service/pagers.py | 756 ++ .../job_service/transports/__init__.py | 33 + .../services/job_service/transports/base.py | 564 ++ .../services/job_service/transports/grpc.py | 1043 ++ .../job_service/transports/grpc_asyncio.py | 1047 ++ .../services/metadata_service/__init__.py | 22 + .../services/metadata_service/async_client.py | 2516 +++++ .../services/metadata_service/client.py | 2739 +++++ .../services/metadata_service/pagers.py | 633 ++ .../metadata_service/transports/__init__.py | 33 + .../metadata_service/transports/base.py | 535 + .../metadata_service/transports/grpc.py | 951 ++ .../transports/grpc_asyncio.py | 955 ++ .../services/migration_service/__init__.py | 22 + .../migration_service/async_client.py | 376 + .../services/migration_service/client.py | 617 ++ .../services/migration_service/pagers.py | 141 + .../migration_service/transports/__init__.py | 33 + .../migration_service/transports/base.py | 189 + .../migration_service/transports/grpc.py | 303 + .../transports/grpc_asyncio.py | 307 + .../services/model_service/__init__.py | 22 + .../services/model_service/async_client.py | 1060 ++ .../services/model_service/client.py | 1283 +++ .../services/model_service/pagers.py | 387 + .../model_service/transports/__init__.py | 33 + .../services/model_service/transports/base.py | 305 + .../services/model_service/transports/grpc.py | 514 + .../model_service/transports/grpc_asyncio.py | 518 + .../services/pipeline_service/__init__.py | 22 + .../services/pipeline_service/async_client.py | 1069 ++ .../services/pipeline_service/client.py | 1328 +++ .../services/pipeline_service/pagers.py | 264 + .../pipeline_service/transports/__init__.py | 33 + .../pipeline_service/transports/base.py | 306 + .../pipeline_service/transports/grpc.py | 539 + .../transports/grpc_asyncio.py | 543 + .../services/prediction_service/__init__.py | 22 + .../prediction_service/async_client.py | 404 + .../services/prediction_service/client.py | 591 ++ .../prediction_service/transports/__init__.py | 33 + .../prediction_service/transports/base.py | 182 + .../prediction_service/transports/grpc.py | 289 + .../transports/grpc_asyncio.py | 293 + .../specialist_pool_service/__init__.py | 22 + .../specialist_pool_service/async_client.py | 650 ++ .../specialist_pool_service/client.py | 837 ++ .../specialist_pool_service/pagers.py | 141 + .../transports/__init__.py | 33 + .../transports/base.py | 232 + .../transports/grpc.py | 382 + .../transports/grpc_asyncio.py | 386 + .../services/tensorboard_service/__init__.py | 22 + .../tensorboard_service/async_client.py | 2348 +++++ .../services/tensorboard_service/client.py | 2562 +++++ .../services/tensorboard_service/pagers.py | 633 ++ .../transports/__init__.py | 33 + .../tensorboard_service/transports/base.py | 504 + .../tensorboard_service/transports/grpc.py | 889 ++ .../transports/grpc_asyncio.py | 893 ++ .../services/vizier_service/__init__.py | 22 + .../services/vizier_service/async_client.py | 1279 +++ .../services/vizier_service/client.py | 1489 +++ .../services/vizier_service/pagers.py | 263 + .../vizier_service/transports/__init__.py | 33 + .../vizier_service/transports/base.py | 374 + .../vizier_service/transports/grpc.py | 657 ++ .../vizier_service/transports/grpc_asyncio.py | 661 ++ .../aiplatform_v1beta1/types/__init__.py | 947 ++ .../types/accelerator_type.py | 38 + .../aiplatform_v1beta1/types/annotation.py | 129 + .../types/annotation_spec.py | 78 + .../aiplatform_v1beta1/types/artifact.py | 151 + .../types/batch_prediction_job.py | 449 + .../types/completion_stats.py | 63 + .../cloud/aiplatform_v1beta1/types/context.py | 134 + .../aiplatform_v1beta1/types/custom_job.py | 384 + .../aiplatform_v1beta1/types/data_item.py | 101 + .../types/data_labeling_job.py | 332 + .../cloud/aiplatform_v1beta1/types/dataset.py | 220 + .../types/dataset_service.py | 542 + .../types/deployed_index_ref.py | 48 + .../types/deployed_model_ref.py | 47 + .../types/encryption_spec.py | 47 + .../aiplatform_v1beta1/types/endpoint.py | 259 + .../types/endpoint_service.py | 380 + .../aiplatform_v1beta1/types/entity_type.py | 116 + .../cloud/aiplatform_v1beta1/types/env_var.py | 56 + .../cloud/aiplatform_v1beta1/types/event.py | 93 + .../aiplatform_v1beta1/types/execution.py | 145 + .../aiplatform_v1beta1/types/explanation.py | 644 ++ .../types/explanation_metadata.py | 449 + .../cloud/aiplatform_v1beta1/types/feature.py | 150 + .../types/feature_monitoring_stats.py | 124 + .../types/feature_selector.py | 60 + .../aiplatform_v1beta1/types/featurestore.py | 125 + .../types/featurestore_monitoring.py | 79 + .../types/featurestore_online_service.py | 337 + .../types/featurestore_service.py | 1500 +++ .../types/hyperparameter_tuning_job.py | 182 + .../cloud/aiplatform_v1beta1/types/index.py | 142 + .../types/index_endpoint.py | 308 + .../types/index_endpoint_service.py | 343 + .../aiplatform_v1beta1/types/index_service.py | 354 + .../cloud/aiplatform_v1beta1/types/io.py | 189 + .../aiplatform_v1beta1/types/job_service.py | 1068 ++ .../aiplatform_v1beta1/types/job_state.py | 41 + .../types/lineage_subgraph.py | 62 + .../types/machine_resources.py | 308 + .../types/manual_batch_tuning_parameters.py | 49 + .../types/metadata_schema.py | 94 + .../types/metadata_service.py | 1197 +++ .../types/metadata_store.py | 99 + .../types/migratable_resource.py | 209 + .../types/migration_service.py | 439 + .../cloud/aiplatform_v1beta1/types/model.py | 752 ++ .../types/model_deployment_monitoring_job.py | 407 + .../types/model_evaluation.py | 133 + .../types/model_evaluation_slice.py | 109 + .../types/model_monitoring.py | 251 + .../aiplatform_v1beta1/types/model_service.py | 569 ++ .../aiplatform_v1beta1/types/operation.py | 81 + .../aiplatform_v1beta1/types/pipeline_job.py | 436 + .../types/pipeline_service.py | 367 + .../types/pipeline_state.py | 40 + .../types/prediction_service.py | 214 + .../types/specialist_pool.py | 80 + .../types/specialist_pool_service.py | 237 + .../cloud/aiplatform_v1beta1/types/study.py | 748 ++ .../aiplatform_v1beta1/types/tensorboard.py | 131 + .../types/tensorboard_data.py | 187 + .../types/tensorboard_experiment.py | 115 + .../types/tensorboard_run.py | 92 + .../types/tensorboard_service.py | 1048 ++ .../types/tensorboard_time_series.py | 152 + .../types/training_pipeline.py | 548 + .../cloud/aiplatform_v1beta1/types/types.py | 82 + .../types/user_action_reference.py | 64 + .../cloud/aiplatform_v1beta1/types/value.py | 55 + .../types/vizier_service.py | 588 ++ owl-bot-staging/v1beta1/mypy.ini | 3 + owl-bot-staging/v1beta1/noxfile.py | 132 + .../schema/predict/instance/.coveragerc | 17 + .../schema/predict/instance/MANIFEST.in | 2 + .../schema/predict/instance/README.rst | 49 + .../schema/predict/instance/docs/conf.py | 376 + .../schema/predict/instance/docs/index.rst | 7 + .../docs/instance_v1beta1/services.rst | 4 + .../instance/docs/instance_v1beta1/types.rst | 7 + .../schema/predict/instance/__init__.py | 37 + .../v1beta1/schema/predict/instance/py.typed | 2 + .../predict/instance_v1beta1/__init__.py | 38 + .../instance_v1beta1/gapic_metadata.json | 7 + .../schema/predict/instance_v1beta1/py.typed | 2 + .../instance_v1beta1/services/__init__.py | 15 + .../instance_v1beta1/types/__init__.py | 54 + .../types/image_classification.py | 55 + .../types/image_object_detection.py | 55 + .../types/image_segmentation.py | 49 + .../types/text_classification.py | 48 + .../instance_v1beta1/types/text_extraction.py | 61 + .../instance_v1beta1/types/text_sentiment.py | 48 + .../types/video_action_recognition.py | 72 + .../types/video_classification.py | 72 + .../types/video_object_tracking.py | 72 + .../v1beta1/schema/predict/instance/mypy.ini | 3 + .../schema/predict/instance/noxfile.py | 132 + .../fixup_instance_v1beta1_keywords.py | 175 + .../v1beta1/schema/predict/instance/setup.py | 53 + .../schema/predict/instance/tests/__init__.py | 16 + .../predict/instance/tests/unit/__init__.py | 16 + .../instance/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/instance_v1beta1/__init__.py | 16 + .../v1beta1/schema/predict/params/.coveragerc | 17 + .../v1beta1/schema/predict/params/MANIFEST.in | 2 + .../v1beta1/schema/predict/params/README.rst | 49 + .../schema/predict/params/docs/conf.py | 376 + .../schema/predict/params/docs/index.rst | 7 + .../params/docs/params_v1beta1/services.rst | 4 + .../params/docs/params_v1beta1/types.rst | 7 + .../v1beta1/schema/predict/params/__init__.py | 31 + .../v1beta1/schema/predict/params/py.typed | 2 + .../schema/predict/params_v1beta1/__init__.py | 32 + .../params_v1beta1/gapic_metadata.json | 7 + .../schema/predict/params_v1beta1/py.typed | 2 + .../params_v1beta1/services/__init__.py | 15 + .../predict/params_v1beta1/types/__init__.py | 42 + .../types/image_classification.py | 51 + .../types/image_object_detection.py | 52 + .../types/image_segmentation.py | 44 + .../types/video_action_recognition.py | 52 + .../types/video_classification.py | 94 + .../types/video_object_tracking.py | 60 + .../v1beta1/schema/predict/params/mypy.ini | 3 + .../v1beta1/schema/predict/params/noxfile.py | 132 + .../scripts/fixup_params_v1beta1_keywords.py | 175 + .../v1beta1/schema/predict/params/setup.py | 53 + .../schema/predict/params/tests/__init__.py | 16 + .../predict/params/tests/unit/__init__.py | 16 + .../params/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/params_v1beta1/__init__.py | 16 + .../schema/predict/prediction/.coveragerc | 17 + .../schema/predict/prediction/MANIFEST.in | 2 + .../schema/predict/prediction/README.rst | 49 + .../schema/predict/prediction/docs/conf.py | 376 + .../schema/predict/prediction/docs/index.rst | 7 + .../docs/prediction_v1beta1/services.rst | 4 + .../docs/prediction_v1beta1/types.rst | 7 + .../schema/predict/prediction/__init__.py | 41 + .../schema/predict/prediction/py.typed | 2 + .../predict/prediction_v1beta1/__init__.py | 42 + .../prediction_v1beta1/gapic_metadata.json | 7 + .../predict/prediction_v1beta1/py.typed | 2 + .../prediction_v1beta1/services/__init__.py | 15 + .../prediction_v1beta1/types/__init__.py | 62 + .../types/classification.py | 56 + .../types/image_object_detection.py | 72 + .../types/image_segmentation.py | 61 + .../types/tabular_classification.py | 51 + .../types/tabular_regression.py | 52 + .../types/text_extraction.py | 77 + .../types/text_sentiment.py | 47 + .../types/time_series_forecasting.py | 40 + .../types/video_action_recognition.py | 84 + .../types/video_classification.py | 102 + .../types/video_object_tracking.py | 144 + .../schema/predict/prediction/mypy.ini | 3 + .../schema/predict/prediction/noxfile.py | 132 + .../fixup_prediction_v1beta1_keywords.py | 175 + .../schema/predict/prediction/setup.py | 53 + .../predict/prediction/tests/__init__.py | 16 + .../predict/prediction/tests/unit/__init__.py | 16 + .../prediction/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/prediction_v1beta1/__init__.py | 16 + .../fixup_aiplatform_v1beta1_keywords.py | 348 + .../fixup_definition_v1beta1_keywords.py | 175 + .../fixup_instance_v1beta1_keywords.py | 175 + .../scripts/fixup_params_v1beta1_keywords.py | 175 + .../fixup_prediction_v1beta1_keywords.py | 175 + owl-bot-staging/v1beta1/setup.py | 53 + owl-bot-staging/v1beta1/tests/__init__.py | 16 + .../v1beta1/tests/unit/__init__.py | 16 + .../v1beta1/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/aiplatform_v1beta1/__init__.py | 16 + .../test_dataset_service.py | 3983 ++++++++ .../test_endpoint_service.py | 2868 ++++++ ...est_featurestore_online_serving_service.py | 1391 +++ .../test_featurestore_service.py | 6489 ++++++++++++ .../test_index_endpoint_service.py | 2859 ++++++ .../aiplatform_v1beta1/test_index_service.py | 2370 +++++ .../aiplatform_v1beta1/test_job_service.py | 9019 +++++++++++++++++ .../test_metadata_service.py | 8381 +++++++++++++++ .../test_migration_service.py | 1753 ++++ .../aiplatform_v1beta1/test_model_service.py | 4053 ++++++++ .../test_pipeline_service.py | 3918 +++++++ .../test_prediction_service.py | 1426 +++ .../test_specialist_pool_service.py | 2346 +++++ .../test_tensorboard_service.py | 7781 ++++++++++++++ .../aiplatform_v1beta1/test_vizier_service.py | 4599 +++++++++ .../unit/gapic/definition_v1beta1/__init__.py | 16 + .../unit/gapic/instance_v1beta1/__init__.py | 16 + .../unit/gapic/params_v1beta1/__init__.py | 16 + .../unit/gapic/prediction_v1beta1/__init__.py | 16 + 728 files changed, 242366 insertions(+) create mode 100644 owl-bot-staging/v1/.coveragerc create mode 100644 owl-bot-staging/v1/MANIFEST.in create mode 100644 owl-bot-staging/v1/README.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst create mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/conf.py create mode 100644 owl-bot-staging/v1/docs/definition_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/definition_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/index.rst create mode 100644 owl-bot-staging/v1/docs/instance_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/instance_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/params_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/params_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/prediction_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/prediction_v1/types.rst create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py create mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py create mode 100644 owl-bot-staging/v1/mypy.ini create mode 100644 owl-bot-staging/v1/noxfile.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/.coveragerc create mode 100644 owl-bot-staging/v1/schema/predict/instance/MANIFEST.in create mode 100644 owl-bot-staging/v1/schema/predict/instance/README.rst create mode 100644 owl-bot-staging/v1/schema/predict/instance/docs/conf.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/docs/index.rst create mode 100644 owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/services.rst create mode 100644 owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/types.rst create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/mypy.ini create mode 100644 owl-bot-staging/v1/schema/predict/instance/noxfile.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/scripts/fixup_instance_v1_keywords.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/setup.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/tests/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/params/.coveragerc create mode 100644 owl-bot-staging/v1/schema/predict/params/MANIFEST.in create mode 100644 owl-bot-staging/v1/schema/predict/params/README.rst create mode 100644 owl-bot-staging/v1/schema/predict/params/docs/conf.py create mode 100644 owl-bot-staging/v1/schema/predict/params/docs/index.rst create mode 100644 owl-bot-staging/v1/schema/predict/params/docs/params_v1/services.rst create mode 100644 owl-bot-staging/v1/schema/predict/params/docs/params_v1/types.rst create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py create mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1/schema/predict/params/mypy.ini create mode 100644 owl-bot-staging/v1/schema/predict/params/noxfile.py create mode 100644 owl-bot-staging/v1/schema/predict/params/scripts/fixup_params_v1_keywords.py create mode 100644 owl-bot-staging/v1/schema/predict/params/setup.py create mode 100644 owl-bot-staging/v1/schema/predict/params/tests/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/params/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/params_v1/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/.coveragerc create mode 100644 owl-bot-staging/v1/schema/predict/prediction/MANIFEST.in create mode 100644 owl-bot-staging/v1/schema/predict/prediction/README.rst create mode 100644 owl-bot-staging/v1/schema/predict/prediction/docs/conf.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/docs/index.rst create mode 100644 owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/services.rst create mode 100644 owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/types.rst create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/mypy.ini create mode 100644 owl-bot-staging/v1/schema/predict/prediction/noxfile.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/setup.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/tests/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py create mode 100644 owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py create mode 100644 owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py create mode 100644 owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py create mode 100644 owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py create mode 100644 owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py create mode 100644 owl-bot-staging/v1/setup.py create mode 100644 owl-bot-staging/v1/tests/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py create mode 100644 owl-bot-staging/v1beta1/.coveragerc create mode 100644 owl-bot-staging/v1beta1/MANIFEST.in create mode 100644 owl-bot-staging/v1beta1/README.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst create mode 100644 owl-bot-staging/v1beta1/docs/conf.py create mode 100644 owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/index.rst create mode 100644 owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py create mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py create mode 100644 owl-bot-staging/v1beta1/mypy.ini create mode 100644 owl-bot-staging/v1beta1/noxfile.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/.coveragerc create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/MANIFEST.in create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/README.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/docs/conf.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/docs/index.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/mypy.ini create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/noxfile.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/setup.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/tests/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/.coveragerc create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/MANIFEST.in create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/README.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/docs/conf.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/docs/index.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/mypy.ini create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/noxfile.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/setup.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/tests/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/.coveragerc create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/MANIFEST.in create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/README.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/docs/conf.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/docs/index.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/mypy.ini create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/noxfile.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/setup.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/tests/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/setup.py create mode 100644 owl-bot-staging/v1beta1/tests/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc new file mode 100644 index 0000000000..01d28d4b2c --- /dev/null +++ b/owl-bot-staging/v1/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in new file mode 100644 index 0000000000..d55f1f202e --- /dev/null +++ b/owl-bot-staging/v1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1/schema/trainingjob/definition *.py +recursive-include google/cloud/aiplatform/v1/schema/trainingjob/definition_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst new file mode 100644 index 0000000000..ad49c55e02 --- /dev/null +++ b/owl-bot-staging/v1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1 Schema Trainingjob Definition API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1 Schema Trainingjob Definition API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst new file mode 100644 index 0000000000..79ddc4623f --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst @@ -0,0 +1,10 @@ +DatasetService +-------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.dataset_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.dataset_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst new file mode 100644 index 0000000000..3b900f851e --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst @@ -0,0 +1,10 @@ +EndpointService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst new file mode 100644 index 0000000000..6afcbbb4d0 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst @@ -0,0 +1,10 @@ +JobService +---------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.job_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.job_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst new file mode 100644 index 0000000000..ac0a5fb3aa --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst @@ -0,0 +1,10 @@ +MigrationService +---------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.migration_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.migration_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst new file mode 100644 index 0000000000..8baab43cbc --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst @@ -0,0 +1,10 @@ +ModelService +------------------------------ + +.. automodule:: google.cloud.aiplatform_v1.services.model_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.model_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst new file mode 100644 index 0000000000..bbf6b32092 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst @@ -0,0 +1,10 @@ +PipelineService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst new file mode 100644 index 0000000000..fdda504879 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst @@ -0,0 +1,6 @@ +PredictionService +----------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.prediction_service + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/services.rst b/owl-bot-staging/v1/docs/aiplatform_v1/services.rst new file mode 100644 index 0000000000..fd5a8c9aa7 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/services.rst @@ -0,0 +1,13 @@ +Services for Google Cloud Aiplatform v1 API +=========================================== +.. toctree:: + :maxdepth: 2 + + dataset_service + endpoint_service + job_service + migration_service + model_service + pipeline_service + prediction_service + specialist_pool_service diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst new file mode 100644 index 0000000000..4a6f288894 --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst @@ -0,0 +1,10 @@ +SpecialistPoolService +--------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/types.rst b/owl-bot-staging/v1/docs/aiplatform_v1/types.rst new file mode 100644 index 0000000000..ad4454843f --- /dev/null +++ b/owl-bot-staging/v1/docs/aiplatform_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform v1 API +======================================== + +.. automodule:: google.cloud.aiplatform_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py new file mode 100644 index 0000000000..4e03a7096d --- /dev/null +++ b/owl-bot-staging/v1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1-schema-trainingjob-definition documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1-schema-trainingjob-definition" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1 Schema Trainingjob Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1-schema-trainingjob-definition-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-trainingjob-definition.tex", + u"google-cloud-aiplatform-v1-schema-trainingjob-definition Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-trainingjob-definition", + u"Google Cloud Aiplatform V1 Schema Trainingjob Definition Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-trainingjob-definition", + u"google-cloud-aiplatform-v1-schema-trainingjob-definition Documentation", + author, + "google-cloud-aiplatform-v1-schema-trainingjob-definition", + "GAPIC library for Google Cloud Aiplatform V1 Schema Trainingjob Definition API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/definition_v1/services.rst b/owl-bot-staging/v1/docs/definition_v1/services.rst new file mode 100644 index 0000000000..ba6b1940e8 --- /dev/null +++ b/owl-bot-staging/v1/docs/definition_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Trainingjob Definition v1 API +============================================================================ +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/definition_v1/types.rst b/owl-bot-staging/v1/docs/definition_v1/types.rst new file mode 100644 index 0000000000..a1df2bce25 --- /dev/null +++ b/owl-bot-staging/v1/docs/definition_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Trainingjob Definition v1 API +========================================================================= + +.. automodule:: google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst new file mode 100644 index 0000000000..ad6ae57609 --- /dev/null +++ b/owl-bot-staging/v1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + definition_v1/services + definition_v1/types diff --git a/owl-bot-staging/v1/docs/instance_v1/services.rst b/owl-bot-staging/v1/docs/instance_v1/services.rst new file mode 100644 index 0000000000..50c011c69a --- /dev/null +++ b/owl-bot-staging/v1/docs/instance_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Instance v1 API +====================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/instance_v1/types.rst b/owl-bot-staging/v1/docs/instance_v1/types.rst new file mode 100644 index 0000000000..564ab013ee --- /dev/null +++ b/owl-bot-staging/v1/docs/instance_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Instance v1 API +=================================================================== + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.instance_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/params_v1/services.rst b/owl-bot-staging/v1/docs/params_v1/services.rst new file mode 100644 index 0000000000..bf08ea6e98 --- /dev/null +++ b/owl-bot-staging/v1/docs/params_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Params v1 API +==================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/params_v1/types.rst b/owl-bot-staging/v1/docs/params_v1/types.rst new file mode 100644 index 0000000000..956ef5224d --- /dev/null +++ b/owl-bot-staging/v1/docs/params_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Params v1 API +================================================================= + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.params_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/prediction_v1/services.rst b/owl-bot-staging/v1/docs/prediction_v1/services.rst new file mode 100644 index 0000000000..ad6f034387 --- /dev/null +++ b/owl-bot-staging/v1/docs/prediction_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API +======================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/prediction_v1/types.rst b/owl-bot-staging/v1/docs/prediction_v1/types.rst new file mode 100644 index 0000000000..a97faf34de --- /dev/null +++ b/owl-bot-staging/v1/docs/prediction_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API +===================================================================== + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.prediction_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py new file mode 100644 index 0000000000..8099ef5dfc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py @@ -0,0 +1,389 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform_v1.services.dataset_service.client import DatasetServiceClient +from google.cloud.aiplatform_v1.services.dataset_service.async_client import DatasetServiceAsyncClient +from google.cloud.aiplatform_v1.services.endpoint_service.client import EndpointServiceClient +from google.cloud.aiplatform_v1.services.endpoint_service.async_client import EndpointServiceAsyncClient +from google.cloud.aiplatform_v1.services.job_service.client import JobServiceClient +from google.cloud.aiplatform_v1.services.job_service.async_client import JobServiceAsyncClient +from google.cloud.aiplatform_v1.services.migration_service.client import MigrationServiceClient +from google.cloud.aiplatform_v1.services.migration_service.async_client import MigrationServiceAsyncClient +from google.cloud.aiplatform_v1.services.model_service.client import ModelServiceClient +from google.cloud.aiplatform_v1.services.model_service.async_client import ModelServiceAsyncClient +from google.cloud.aiplatform_v1.services.pipeline_service.client import PipelineServiceClient +from google.cloud.aiplatform_v1.services.pipeline_service.async_client import PipelineServiceAsyncClient +from google.cloud.aiplatform_v1.services.prediction_service.client import PredictionServiceClient +from google.cloud.aiplatform_v1.services.prediction_service.async_client import PredictionServiceAsyncClient +from google.cloud.aiplatform_v1.services.specialist_pool_service.client import SpecialistPoolServiceClient +from google.cloud.aiplatform_v1.services.specialist_pool_service.async_client import SpecialistPoolServiceAsyncClient + +from google.cloud.aiplatform_v1.types.accelerator_type import AcceleratorType +from google.cloud.aiplatform_v1.types.annotation import Annotation +from google.cloud.aiplatform_v1.types.annotation_spec import AnnotationSpec +from google.cloud.aiplatform_v1.types.artifact import Artifact +from google.cloud.aiplatform_v1.types.batch_prediction_job import BatchPredictionJob +from google.cloud.aiplatform_v1.types.completion_stats import CompletionStats +from google.cloud.aiplatform_v1.types.context import Context +from google.cloud.aiplatform_v1.types.custom_job import ContainerSpec +from google.cloud.aiplatform_v1.types.custom_job import CustomJob +from google.cloud.aiplatform_v1.types.custom_job import CustomJobSpec +from google.cloud.aiplatform_v1.types.custom_job import PythonPackageSpec +from google.cloud.aiplatform_v1.types.custom_job import Scheduling +from google.cloud.aiplatform_v1.types.custom_job import WorkerPoolSpec +from google.cloud.aiplatform_v1.types.data_item import DataItem +from google.cloud.aiplatform_v1.types.data_labeling_job import ActiveLearningConfig +from google.cloud.aiplatform_v1.types.data_labeling_job import DataLabelingJob +from google.cloud.aiplatform_v1.types.data_labeling_job import SampleConfig +from google.cloud.aiplatform_v1.types.data_labeling_job import TrainingConfig +from google.cloud.aiplatform_v1.types.dataset import Dataset +from google.cloud.aiplatform_v1.types.dataset import ExportDataConfig +from google.cloud.aiplatform_v1.types.dataset import ImportDataConfig +from google.cloud.aiplatform_v1.types.dataset_service import CreateDatasetOperationMetadata +from google.cloud.aiplatform_v1.types.dataset_service import CreateDatasetRequest +from google.cloud.aiplatform_v1.types.dataset_service import DeleteDatasetRequest +from google.cloud.aiplatform_v1.types.dataset_service import ExportDataOperationMetadata +from google.cloud.aiplatform_v1.types.dataset_service import ExportDataRequest +from google.cloud.aiplatform_v1.types.dataset_service import ExportDataResponse +from google.cloud.aiplatform_v1.types.dataset_service import GetAnnotationSpecRequest +from google.cloud.aiplatform_v1.types.dataset_service import GetDatasetRequest +from google.cloud.aiplatform_v1.types.dataset_service import ImportDataOperationMetadata +from google.cloud.aiplatform_v1.types.dataset_service import ImportDataRequest +from google.cloud.aiplatform_v1.types.dataset_service import ImportDataResponse +from google.cloud.aiplatform_v1.types.dataset_service import ListAnnotationsRequest +from google.cloud.aiplatform_v1.types.dataset_service import ListAnnotationsResponse +from google.cloud.aiplatform_v1.types.dataset_service import ListDataItemsRequest +from google.cloud.aiplatform_v1.types.dataset_service import ListDataItemsResponse +from google.cloud.aiplatform_v1.types.dataset_service import ListDatasetsRequest +from google.cloud.aiplatform_v1.types.dataset_service import ListDatasetsResponse +from google.cloud.aiplatform_v1.types.dataset_service import UpdateDatasetRequest +from google.cloud.aiplatform_v1.types.deployed_model_ref import DeployedModelRef +from google.cloud.aiplatform_v1.types.encryption_spec import EncryptionSpec +from google.cloud.aiplatform_v1.types.endpoint import DeployedModel +from google.cloud.aiplatform_v1.types.endpoint import Endpoint +from google.cloud.aiplatform_v1.types.endpoint_service import CreateEndpointOperationMetadata +from google.cloud.aiplatform_v1.types.endpoint_service import CreateEndpointRequest +from google.cloud.aiplatform_v1.types.endpoint_service import DeleteEndpointRequest +from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelOperationMetadata +from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelRequest +from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelResponse +from google.cloud.aiplatform_v1.types.endpoint_service import GetEndpointRequest +from google.cloud.aiplatform_v1.types.endpoint_service import ListEndpointsRequest +from google.cloud.aiplatform_v1.types.endpoint_service import ListEndpointsResponse +from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelOperationMetadata +from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelRequest +from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelResponse +from google.cloud.aiplatform_v1.types.endpoint_service import UpdateEndpointRequest +from google.cloud.aiplatform_v1.types.env_var import EnvVar +from google.cloud.aiplatform_v1.types.execution import Execution +from google.cloud.aiplatform_v1.types.hyperparameter_tuning_job import HyperparameterTuningJob +from google.cloud.aiplatform_v1.types.io import BigQueryDestination +from google.cloud.aiplatform_v1.types.io import BigQuerySource +from google.cloud.aiplatform_v1.types.io import ContainerRegistryDestination +from google.cloud.aiplatform_v1.types.io import GcsDestination +from google.cloud.aiplatform_v1.types.io import GcsSource +from google.cloud.aiplatform_v1.types.job_service import CancelBatchPredictionJobRequest +from google.cloud.aiplatform_v1.types.job_service import CancelCustomJobRequest +from google.cloud.aiplatform_v1.types.job_service import CancelDataLabelingJobRequest +from google.cloud.aiplatform_v1.types.job_service import CancelHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1.types.job_service import CreateBatchPredictionJobRequest +from google.cloud.aiplatform_v1.types.job_service import CreateCustomJobRequest +from google.cloud.aiplatform_v1.types.job_service import CreateDataLabelingJobRequest +from google.cloud.aiplatform_v1.types.job_service import CreateHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1.types.job_service import DeleteBatchPredictionJobRequest +from google.cloud.aiplatform_v1.types.job_service import DeleteCustomJobRequest +from google.cloud.aiplatform_v1.types.job_service import DeleteDataLabelingJobRequest +from google.cloud.aiplatform_v1.types.job_service import DeleteHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1.types.job_service import GetBatchPredictionJobRequest +from google.cloud.aiplatform_v1.types.job_service import GetCustomJobRequest +from google.cloud.aiplatform_v1.types.job_service import GetDataLabelingJobRequest +from google.cloud.aiplatform_v1.types.job_service import GetHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1.types.job_service import ListBatchPredictionJobsRequest +from google.cloud.aiplatform_v1.types.job_service import ListBatchPredictionJobsResponse +from google.cloud.aiplatform_v1.types.job_service import ListCustomJobsRequest +from google.cloud.aiplatform_v1.types.job_service import ListCustomJobsResponse +from google.cloud.aiplatform_v1.types.job_service import ListDataLabelingJobsRequest +from google.cloud.aiplatform_v1.types.job_service import ListDataLabelingJobsResponse +from google.cloud.aiplatform_v1.types.job_service import ListHyperparameterTuningJobsRequest +from google.cloud.aiplatform_v1.types.job_service import ListHyperparameterTuningJobsResponse +from google.cloud.aiplatform_v1.types.job_state import JobState +from google.cloud.aiplatform_v1.types.machine_resources import AutomaticResources +from google.cloud.aiplatform_v1.types.machine_resources import AutoscalingMetricSpec +from google.cloud.aiplatform_v1.types.machine_resources import BatchDedicatedResources +from google.cloud.aiplatform_v1.types.machine_resources import DedicatedResources +from google.cloud.aiplatform_v1.types.machine_resources import DiskSpec +from google.cloud.aiplatform_v1.types.machine_resources import MachineSpec +from google.cloud.aiplatform_v1.types.machine_resources import ResourcesConsumed +from google.cloud.aiplatform_v1.types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from google.cloud.aiplatform_v1.types.migratable_resource import MigratableResource +from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesOperationMetadata +from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesRequest +from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesResponse +from google.cloud.aiplatform_v1.types.migration_service import MigrateResourceRequest +from google.cloud.aiplatform_v1.types.migration_service import MigrateResourceResponse +from google.cloud.aiplatform_v1.types.migration_service import SearchMigratableResourcesRequest +from google.cloud.aiplatform_v1.types.migration_service import SearchMigratableResourcesResponse +from google.cloud.aiplatform_v1.types.model import Model +from google.cloud.aiplatform_v1.types.model import ModelContainerSpec +from google.cloud.aiplatform_v1.types.model import Port +from google.cloud.aiplatform_v1.types.model import PredictSchemata +from google.cloud.aiplatform_v1.types.model_evaluation import ModelEvaluation +from google.cloud.aiplatform_v1.types.model_evaluation_slice import ModelEvaluationSlice +from google.cloud.aiplatform_v1.types.model_service import DeleteModelRequest +from google.cloud.aiplatform_v1.types.model_service import ExportModelOperationMetadata +from google.cloud.aiplatform_v1.types.model_service import ExportModelRequest +from google.cloud.aiplatform_v1.types.model_service import ExportModelResponse +from google.cloud.aiplatform_v1.types.model_service import GetModelEvaluationRequest +from google.cloud.aiplatform_v1.types.model_service import GetModelEvaluationSliceRequest +from google.cloud.aiplatform_v1.types.model_service import GetModelRequest +from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationSlicesRequest +from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationSlicesResponse +from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationsRequest +from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationsResponse +from google.cloud.aiplatform_v1.types.model_service import ListModelsRequest +from google.cloud.aiplatform_v1.types.model_service import ListModelsResponse +from google.cloud.aiplatform_v1.types.model_service import UpdateModelRequest +from google.cloud.aiplatform_v1.types.model_service import UploadModelOperationMetadata +from google.cloud.aiplatform_v1.types.model_service import UploadModelRequest +from google.cloud.aiplatform_v1.types.model_service import UploadModelResponse +from google.cloud.aiplatform_v1.types.operation import DeleteOperationMetadata +from google.cloud.aiplatform_v1.types.operation import GenericOperationMetadata +from google.cloud.aiplatform_v1.types.pipeline_job import PipelineJob +from google.cloud.aiplatform_v1.types.pipeline_job import PipelineJobDetail +from google.cloud.aiplatform_v1.types.pipeline_job import PipelineTaskDetail +from google.cloud.aiplatform_v1.types.pipeline_job import PipelineTaskExecutorDetail +from google.cloud.aiplatform_v1.types.pipeline_service import CancelPipelineJobRequest +from google.cloud.aiplatform_v1.types.pipeline_service import CancelTrainingPipelineRequest +from google.cloud.aiplatform_v1.types.pipeline_service import CreatePipelineJobRequest +from google.cloud.aiplatform_v1.types.pipeline_service import CreateTrainingPipelineRequest +from google.cloud.aiplatform_v1.types.pipeline_service import DeletePipelineJobRequest +from google.cloud.aiplatform_v1.types.pipeline_service import DeleteTrainingPipelineRequest +from google.cloud.aiplatform_v1.types.pipeline_service import GetPipelineJobRequest +from google.cloud.aiplatform_v1.types.pipeline_service import GetTrainingPipelineRequest +from google.cloud.aiplatform_v1.types.pipeline_service import ListPipelineJobsRequest +from google.cloud.aiplatform_v1.types.pipeline_service import ListPipelineJobsResponse +from google.cloud.aiplatform_v1.types.pipeline_service import ListTrainingPipelinesRequest +from google.cloud.aiplatform_v1.types.pipeline_service import ListTrainingPipelinesResponse +from google.cloud.aiplatform_v1.types.pipeline_state import PipelineState +from google.cloud.aiplatform_v1.types.prediction_service import PredictRequest +from google.cloud.aiplatform_v1.types.prediction_service import PredictResponse +from google.cloud.aiplatform_v1.types.specialist_pool import SpecialistPool +from google.cloud.aiplatform_v1.types.specialist_pool_service import CreateSpecialistPoolOperationMetadata +from google.cloud.aiplatform_v1.types.specialist_pool_service import CreateSpecialistPoolRequest +from google.cloud.aiplatform_v1.types.specialist_pool_service import DeleteSpecialistPoolRequest +from google.cloud.aiplatform_v1.types.specialist_pool_service import GetSpecialistPoolRequest +from google.cloud.aiplatform_v1.types.specialist_pool_service import ListSpecialistPoolsRequest +from google.cloud.aiplatform_v1.types.specialist_pool_service import ListSpecialistPoolsResponse +from google.cloud.aiplatform_v1.types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata +from google.cloud.aiplatform_v1.types.specialist_pool_service import UpdateSpecialistPoolRequest +from google.cloud.aiplatform_v1.types.study import Measurement +from google.cloud.aiplatform_v1.types.study import StudySpec +from google.cloud.aiplatform_v1.types.study import Trial +from google.cloud.aiplatform_v1.types.training_pipeline import FilterSplit +from google.cloud.aiplatform_v1.types.training_pipeline import FractionSplit +from google.cloud.aiplatform_v1.types.training_pipeline import InputDataConfig +from google.cloud.aiplatform_v1.types.training_pipeline import PredefinedSplit +from google.cloud.aiplatform_v1.types.training_pipeline import TimestampSplit +from google.cloud.aiplatform_v1.types.training_pipeline import TrainingPipeline +from google.cloud.aiplatform_v1.types.user_action_reference import UserActionReference +from google.cloud.aiplatform_v1.types.value import Value + +__all__ = ('DatasetServiceClient', + 'DatasetServiceAsyncClient', + 'EndpointServiceClient', + 'EndpointServiceAsyncClient', + 'JobServiceClient', + 'JobServiceAsyncClient', + 'MigrationServiceClient', + 'MigrationServiceAsyncClient', + 'ModelServiceClient', + 'ModelServiceAsyncClient', + 'PipelineServiceClient', + 'PipelineServiceAsyncClient', + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', + 'SpecialistPoolServiceClient', + 'SpecialistPoolServiceAsyncClient', + 'AcceleratorType', + 'Annotation', + 'AnnotationSpec', + 'Artifact', + 'BatchPredictionJob', + 'CompletionStats', + 'Context', + 'ContainerSpec', + 'CustomJob', + 'CustomJobSpec', + 'PythonPackageSpec', + 'Scheduling', + 'WorkerPoolSpec', + 'DataItem', + 'ActiveLearningConfig', + 'DataLabelingJob', + 'SampleConfig', + 'TrainingConfig', + 'Dataset', + 'ExportDataConfig', + 'ImportDataConfig', + 'CreateDatasetOperationMetadata', + 'CreateDatasetRequest', + 'DeleteDatasetRequest', + 'ExportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'ImportDataOperationMetadata', + 'ImportDataRequest', + 'ImportDataResponse', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeployedModelRef', + 'EncryptionSpec', + 'DeployedModel', + 'Endpoint', + 'CreateEndpointOperationMetadata', + 'CreateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelOperationMetadata', + 'DeployModelRequest', + 'DeployModelResponse', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UndeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UpdateEndpointRequest', + 'EnvVar', + 'Execution', + 'HyperparameterTuningJob', + 'BigQueryDestination', + 'BigQuerySource', + 'ContainerRegistryDestination', + 'GcsDestination', + 'GcsSource', + 'CancelBatchPredictionJobRequest', + 'CancelCustomJobRequest', + 'CancelDataLabelingJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'CreateCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'DeleteBatchPredictionJobRequest', + 'DeleteCustomJobRequest', + 'DeleteDataLabelingJobRequest', + 'DeleteHyperparameterTuningJobRequest', + 'GetBatchPredictionJobRequest', + 'GetCustomJobRequest', + 'GetDataLabelingJobRequest', + 'GetHyperparameterTuningJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'JobState', + 'AutomaticResources', + 'AutoscalingMetricSpec', + 'BatchDedicatedResources', + 'DedicatedResources', + 'DiskSpec', + 'MachineSpec', + 'ResourcesConsumed', + 'ManualBatchTuningParameters', + 'MigratableResource', + 'BatchMigrateResourcesOperationMetadata', + 'BatchMigrateResourcesRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceRequest', + 'MigrateResourceResponse', + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'Model', + 'ModelContainerSpec', + 'Port', + 'PredictSchemata', + 'ModelEvaluation', + 'ModelEvaluationSlice', + 'DeleteModelRequest', + 'ExportModelOperationMetadata', + 'ExportModelRequest', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'GetModelEvaluationSliceRequest', + 'GetModelRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'UpdateModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelRequest', + 'UploadModelResponse', + 'DeleteOperationMetadata', + 'GenericOperationMetadata', + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + 'CancelPipelineJobRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'CreateTrainingPipelineRequest', + 'DeletePipelineJobRequest', + 'DeleteTrainingPipelineRequest', + 'GetPipelineJobRequest', + 'GetTrainingPipelineRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'PipelineState', + 'PredictRequest', + 'PredictResponse', + 'SpecialistPool', + 'CreateSpecialistPoolOperationMetadata', + 'CreateSpecialistPoolRequest', + 'DeleteSpecialistPoolRequest', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'UpdateSpecialistPoolOperationMetadata', + 'UpdateSpecialistPoolRequest', + 'Measurement', + 'StudySpec', + 'Trial', + 'FilterSplit', + 'FractionSplit', + 'InputDataConfig', + 'PredefinedSplit', + 'TimestampSplit', + 'TrainingPipeline', + 'UserActionReference', + 'Value', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/py.typed new file mode 100644 index 0000000000..228f1c51c6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..41d6704c1f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_classification import ImageClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_object_detection import ImageObjectDetectionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_segmentation import ImageSegmentationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_classification import TextClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_extraction import TextExtractionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_sentiment import TextSentimentPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_action_recognition import VideoActionRecognitionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_classification import VideoClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ('ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..f70e7f605a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py new file mode 100644 index 0000000000..41ab5407a7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ( +'ImageClassificationPredictionInstance', +'ImageObjectDetectionPredictionInstance', +'ImageSegmentationPredictionInstance', +'TextClassificationPredictionInstance', +'TextExtractionPredictionInstance', +'TextSentimentPredictionInstance', +'VideoActionRecognitionPredictionInstance', +'VideoClassificationPredictionInstance', +'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json new file mode 100644 index 0000000000..0ae909d6ea --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.instance_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.instance", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed new file mode 100644 index 0000000000..f70e7f605a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py new file mode 100644 index 0000000000..80a5332604 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + 'ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py new file mode 100644 index 0000000000..02e95ba9c7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageClassificationPredictionInstance', + }, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py new file mode 100644 index 0000000000..0b9aadc101 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageObjectDetectionPredictionInstance', + }, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py new file mode 100644 index 0000000000..f967807e6c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageSegmentationPredictionInstance', + }, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py new file mode 100644 index 0000000000..4eec13516c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextClassificationPredictionInstance', + }, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py new file mode 100644 index 0000000000..ffecb3de51 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextExtractionPredictionInstance', + }, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. Vertex AI will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + key = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py new file mode 100644 index 0000000000..5bdfe5d5ba --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextSentimentPredictionInstance', + }, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..d53782868f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoActionRecognitionPredictionInstance', + }, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py new file mode 100644 index 0000000000..b51ab464a4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoClassificationPredictionInstance', + }, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..8b96f75fd2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoObjectTrackingPredictionInstance', + }, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..91ae7f0d5c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_classification import ImageClassificationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_object_detection import ImageObjectDetectionPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_segmentation import ImageSegmentationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_action_recognition import VideoActionRecognitionPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_classification import VideoClassificationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ('ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed new file mode 100644 index 0000000000..df96e61590 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py new file mode 100644 index 0000000000..91b718b437 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ( +'ImageClassificationPredictionParams', +'ImageObjectDetectionPredictionParams', +'ImageSegmentationPredictionParams', +'VideoActionRecognitionPredictionParams', +'VideoClassificationPredictionParams', +'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json new file mode 100644 index 0000000000..edfffb441b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.params_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.params", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed new file mode 100644 index 0000000000..df96e61590 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py new file mode 100644 index 0000000000..70a92bb59c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + 'ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py new file mode 100644 index 0000000000..1668600544 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageClassificationPredictionParams', + }, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py new file mode 100644 index 0000000000..43c7814607 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageObjectDetectionPredictionParams', + }, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py new file mode 100644 index 0000000000..695a3a7745 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageSegmentationPredictionParams', + }, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..88e714e9cf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoActionRecognitionPredictionParams', + }, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py new file mode 100644 index 0000000000..06b2347eb6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoClassificationPredictionParams', + }, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. Vertex AI returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. Vertex AI determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. Vertex AI then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + segment_classification = proto.Field( + proto.BOOL, + number=3, + ) + shot_classification = proto.Field( + proto.BOOL, + number=4, + ) + one_sec_interval_classification = proto.Field( + proto.BOOL, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..820a73e3c6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoObjectTrackingPredictionParams', + }, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + min_bounding_box_size = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..27d9f97862 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.classification import ClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_object_detection import ImageObjectDetectionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_segmentation import ImageSegmentationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_classification import TabularClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_regression import TabularRegressionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_extraction import TextExtractionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_sentiment import TextSentimentPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_action_recognition import VideoActionRecognitionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_classification import VideoClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ('ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..472fa4d8cc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py new file mode 100644 index 0000000000..3cf9304526 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ( +'ClassificationPredictionResult', +'ImageObjectDetectionPredictionResult', +'ImageSegmentationPredictionResult', +'TabularClassificationPredictionResult', +'TabularRegressionPredictionResult', +'TextExtractionPredictionResult', +'TextSentimentPredictionResult', +'VideoActionRecognitionPredictionResult', +'VideoClassificationPredictionResult', +'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json new file mode 100644 index 0000000000..ba1d67a00c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.prediction_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.prediction", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed new file mode 100644 index 0000000000..472fa4d8cc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py new file mode 100644 index 0000000000..b7b7c056aa --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + 'ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py new file mode 100644 index 0000000000..f14b084951 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ClassificationPredictionResult', + }, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py new file mode 100644 index 0000000000..74178c5502 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ImageObjectDetectionPredictionResult', + }, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + bboxes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=struct_pb2.ListValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py new file mode 100644 index 0000000000..e93991222a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ImageSegmentationPredictionResult', + }, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + Attributes: + category_mask (str): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (str): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field( + proto.STRING, + number=1, + ) + confidence_mask = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py new file mode 100644 index 0000000000..a36bf8f991 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TabularClassificationPredictionResult', + }, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField( + proto.STRING, + number=1, + ) + scores = proto.RepeatedField( + proto.FLOAT, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py new file mode 100644 index 0000000000..56af2af196 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TabularRegressionPredictionResult', + }, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + lower_bound = proto.Field( + proto.FLOAT, + number=2, + ) + upper_bound = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py new file mode 100644 index 0000000000..3e7398f165 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TextExtractionPredictionResult', + }, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + text_segment_start_offsets = proto.RepeatedField( + proto.INT64, + number=3, + ) + text_segment_end_offsets = proto.RepeatedField( + proto.INT64, + number=4, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py new file mode 100644 index 0000000000..135db45729 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TextSentimentPredictionResult', + }, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Prediction output format for Text Sentiment + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..5a853655ae --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoActionRecognitionPredictionResult', + }, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py new file mode 100644 index 0000000000..da14b3253e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoClassificationPredictionResult', + }, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + type_ = proto.Field( + proto.STRING, + number=3, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..9b70e913cd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoObjectTrackingPredictionResult', + }, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (google.protobuf.duration_pb2.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (google.protobuf.wrappers_pb2.FloatValue): + The leftmost coordinate of the bounding box. + x_max (google.protobuf.wrappers_pb2.FloatValue): + The rightmost coordinate of the bounding box. + y_min (google.protobuf.wrappers_pb2.FloatValue): + The topmost coordinate of the bounding box. + y_max (google.protobuf.wrappers_pb2.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + x_min = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.FloatValue, + ) + x_max = proto.Field( + proto.MESSAGE, + number=3, + message=wrappers_pb2.FloatValue, + ) + y_min = proto.Field( + proto.MESSAGE, + number=4, + message=wrappers_pb2.FloatValue, + ) + y_max = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + frames = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=Frame, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py new file mode 100644 index 0000000000..0e86266695 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassification +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassificationInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassificationMetadata +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetection +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentation +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentationInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTables +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTablesInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTablesMetadata +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import AutoMlTextClassification +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import AutoMlTextClassificationInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import AutoMlTextExtraction +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import AutoMlTextExtractionInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import AutoMlTextSentiment +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import AutoMlTextSentimentInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import AutoMlVideoActionRecognition +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import AutoMlVideoClassification +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import AutoMlVideoClassificationInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import AutoMlVideoObjectTracking +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + +__all__ = ('AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + 'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed new file mode 100644 index 0000000000..1a9d2972a0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py new file mode 100644 index 0000000000..f4e2447d46 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.automl_image_classification import AutoMlImageClassification +from .types.automl_image_classification import AutoMlImageClassificationInputs +from .types.automl_image_classification import AutoMlImageClassificationMetadata +from .types.automl_image_object_detection import AutoMlImageObjectDetection +from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from .types.automl_image_segmentation import AutoMlImageSegmentation +from .types.automl_image_segmentation import AutoMlImageSegmentationInputs +from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from .types.automl_tables import AutoMlTables +from .types.automl_tables import AutoMlTablesInputs +from .types.automl_tables import AutoMlTablesMetadata +from .types.automl_text_classification import AutoMlTextClassification +from .types.automl_text_classification import AutoMlTextClassificationInputs +from .types.automl_text_extraction import AutoMlTextExtraction +from .types.automl_text_extraction import AutoMlTextExtractionInputs +from .types.automl_text_sentiment import AutoMlTextSentiment +from .types.automl_text_sentiment import AutoMlTextSentimentInputs +from .types.automl_video_action_recognition import AutoMlVideoActionRecognition +from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from .types.automl_video_classification import AutoMlVideoClassification +from .types.automl_video_classification import AutoMlVideoClassificationInputs +from .types.automl_video_object_tracking import AutoMlVideoObjectTracking +from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + +__all__ = ( +'AutoMlImageClassification', +'AutoMlImageClassificationInputs', +'AutoMlImageClassificationMetadata', +'AutoMlImageObjectDetection', +'AutoMlImageObjectDetectionInputs', +'AutoMlImageObjectDetectionMetadata', +'AutoMlImageSegmentation', +'AutoMlImageSegmentationInputs', +'AutoMlImageSegmentationMetadata', +'AutoMlTables', +'AutoMlTablesInputs', +'AutoMlTablesMetadata', +'AutoMlTextClassification', +'AutoMlTextClassificationInputs', +'AutoMlTextExtraction', +'AutoMlTextExtractionInputs', +'AutoMlTextSentiment', +'AutoMlTextSentimentInputs', +'AutoMlVideoActionRecognition', +'AutoMlVideoActionRecognitionInputs', +'AutoMlVideoClassification', +'AutoMlVideoClassificationInputs', +'AutoMlVideoObjectTracking', +'AutoMlVideoObjectTrackingInputs', +'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json new file mode 100644 index 0000000000..620ff75f05 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.trainingjob.definition_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.trainingjob.definition", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed new file mode 100644 index 0000000000..1a9d2972a0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py new file mode 100644 index 0000000000..4b8bb9425b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .automl_image_classification import ( + AutoMlImageClassification, + AutoMlImageClassificationInputs, + AutoMlImageClassificationMetadata, +) +from .automl_image_object_detection import ( + AutoMlImageObjectDetection, + AutoMlImageObjectDetectionInputs, + AutoMlImageObjectDetectionMetadata, +) +from .automl_image_segmentation import ( + AutoMlImageSegmentation, + AutoMlImageSegmentationInputs, + AutoMlImageSegmentationMetadata, +) +from .automl_tables import ( + AutoMlTables, + AutoMlTablesInputs, + AutoMlTablesMetadata, +) +from .automl_text_classification import ( + AutoMlTextClassification, + AutoMlTextClassificationInputs, +) +from .automl_text_extraction import ( + AutoMlTextExtraction, + AutoMlTextExtractionInputs, +) +from .automl_text_sentiment import ( + AutoMlTextSentiment, + AutoMlTextSentimentInputs, +) +from .automl_video_action_recognition import ( + AutoMlVideoActionRecognition, + AutoMlVideoActionRecognitionInputs, +) +from .automl_video_classification import ( + AutoMlVideoClassification, + AutoMlVideoClassificationInputs, +) +from .automl_video_object_tracking import ( + AutoMlVideoObjectTracking, + AutoMlVideoObjectTrackingInputs, +) +from .export_evaluated_data_items_config import ( + ExportEvaluatedDataItemsConfig, +) + +__all__ = ( + 'AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + 'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py new file mode 100644 index 0000000000..8046ad8725 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + }, +) + + +class AutoMlImageClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageClassificationInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageClassificationMetadata', + ) + + +class AutoMlImageClassificationInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs.ModelType): + + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 8,000 and + 800,000 milli node hours, inclusive. The default value is + 192,000 which represents one day in wall time, considering 8 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, + the training budget must be between 1,000 and 100,000 milli + node hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Classification might stop training + before the entire training budget has been used. + multi_label (bool): + If false, a single-label (multi-class) Model + will be trained (i.e. assuming that for each + image just up to one annotation may be + applicable). If true, a multi-label Model will + be trained (i.e. assuming that for each image + multiple annotations may be applicable). + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_TF_LOW_LATENCY_1 = 2 + MOBILE_TF_VERSATILE_1 = 3 + MOBILE_TF_HIGH_ACCURACY_1 = 4 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + base_model_id = proto.Field( + proto.STRING, + number=2, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=3, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=4, + ) + multi_label = proto.Field( + proto.BOOL, + number=5, + ) + + +class AutoMlImageClassificationMetadata(proto.Message): + r""" + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py new file mode 100644 index 0000000000..52b7bbee80 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + }, +) + + +class AutoMlImageObjectDetection(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image Object + Detection Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata): + The metadata information + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageObjectDetectionInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageObjectDetectionMetadata', + ) + + +class AutoMlImageObjectDetectionInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 20,000 and + 900,000 milli node hours, inclusive. The default value is + 216,000 which represents one day in wall time, considering 9 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the + training budget must be between 1,000 and 100,000 milli node + hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Object Detection might stop + training before the entire training budget has + been used. + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_LATENCY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + MOBILE_TF_VERSATILE_1 = 4 + MOBILE_TF_HIGH_ACCURACY_1 = 5 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=2, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=3, + ) + + +class AutoMlImageObjectDetectionMetadata(proto.Message): + r""" + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py new file mode 100644 index 0000000000..8e3728f200 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + }, +) + + +class AutoMlImageSegmentation(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Segmentation Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageSegmentationInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageSegmentationMetadata', + ) + + +class AutoMlImageSegmentationInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. Or actaul_wall_clock_hours = + train_budget_milli_node_hours / (number_of_nodes_involved \* + 1000) For modelType ``cloud-high-accuracy-1``\ (default), + the budget must be between 20,000 and 2,000,000 milli node + hours, inclusive. The default value is 192,000 which + represents one day in wall time (1000 milli \* 24 hours \* 8 + nodes). + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_ACCURACY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=2, + ) + base_model_id = proto.Field( + proto.STRING, + number=3, + ) + + +class AutoMlImageSegmentationMetadata(proto.Message): + r""" + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py new file mode 100644 index 0000000000..26823412a8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py @@ -0,0 +1,499 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + }, +) + + +class AutoMlTables(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Tables Model. + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTablesInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlTablesMetadata', + ) + + +class AutoMlTablesInputs(proto.Message): + r""" + Attributes: + optimization_objective_recall_value (float): + Required when optimization_objective is + "maximize-precision-at-recall". Must be between 0 and 1, + inclusive. + optimization_objective_precision_value (float): + Required when optimization_objective is + "maximize-recall-at-precision". Must be between 0 and 1, + inclusive. + prediction_type (str): + The type of prediction the Model is to + produce. "classification" - Predict one out of + multiple target values is + picked for each row. + "regression" - Predict a value based on its + relation to other values. This + type is available only to columns that contain + semantically numeric values, i.e. integers or + floating point number, even if + stored as e.g. strings. + target_column (str): + The column name of the target column that the + model is to predict. + transformations (Sequence[google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing + towards. The training process creates a model + that maximizes/minimizes the value of the + objective function over the validation set. + + The supported optimization objectives depend on + the prediction type. If the field is not set, a + default objective function is used. + classification (binary): + "maximize-au-roc" (default) - Maximize the + area under the receiver + operating characteristic (ROC) curve. + "minimize-log-loss" - Minimize log loss. + "maximize-au-prc" - Maximize the area under + the precision-recall curve. "maximize- + precision-at-recall" - Maximize precision for a + specified + recall value. "maximize-recall-at-precision" - + Maximize recall for a specified + precision value. + classification (multi-class): + "minimize-log-loss" (default) - Minimize log + loss. + regression: + "minimize-rmse" (default) - Minimize root- + mean-squared error (RMSE). "minimize-mae" - + Minimize mean-absolute error (MAE). "minimize- + rmsle" - Minimize root-mean-squared log error + (RMSLE). + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. By default, the + early stopping feature is enabled, which means + that AutoML Tables might stop training before + the entire training budget has been used. + weight_column_name (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + export_evaluated_data_items_config (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + additional_experiments (Sequence[str]): + Additional experiment flags for the Tables + training pipeline. + """ + + class Transformation(proto.Message): + r""" + Attributes: + auto (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.AutoTransformation): + + numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericTransformation): + + categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation): + + timestamp (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TimestampTransformation): + + text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextTransformation): + + repeated_numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation): + + repeated_categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): + + repeated_text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation): + + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=2, + ) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = + ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + time_format = proto.Field( + proto.STRING, + number=2, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=3, + ) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting + embedding. + - Stop-words receive no special treatment and are not removed. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericArrayTransformation(proto.Message): + r"""Treats the column as numerical array and performs following + transformation functions. + + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=2, + ) + + class CategoricalArrayTransformation(proto.Message): + r"""Treats the column as categorical array and performs following + transformation functions. + + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TextArrayTransformation(proto.Message): + r"""Treats the column as text array and performs following + transformation functions. + + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as + a single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.AutoTransformation', + ) + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.NumericTransformation', + ) + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.CategoricalTransformation', + ) + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TimestampTransformation', + ) + text = proto.Field( + proto.MESSAGE, + number=5, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TextTransformation', + ) + repeated_numeric = proto.Field( + proto.MESSAGE, + number=6, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.NumericArrayTransformation', + ) + repeated_categorical = proto.Field( + proto.MESSAGE, + number=7, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.CategoricalArrayTransformation', + ) + repeated_text = proto.Field( + proto.MESSAGE, + number=8, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TextArrayTransformation', + ) + + optimization_objective_recall_value = proto.Field( + proto.FLOAT, + number=5, + oneof='additional_optimization_objective_config', + ) + optimization_objective_precision_value = proto.Field( + proto.FLOAT, + number=6, + oneof='additional_optimization_objective_config', + ) + prediction_type = proto.Field( + proto.STRING, + number=1, + ) + target_column = proto.Field( + proto.STRING, + number=2, + ) + transformations = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Transformation, + ) + optimization_objective = proto.Field( + proto.STRING, + number=4, + ) + train_budget_milli_node_hours = proto.Field( + proto.INT64, + number=7, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=8, + ) + weight_column_name = proto.Field( + proto.STRING, + number=9, + ) + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=10, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + additional_experiments = proto.RepeatedField( + proto.STRING, + number=11, + ) + + +class AutoMlTablesMetadata(proto.Message): + r"""Model metadata specific to AutoML Tables. + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py new file mode 100644 index 0000000000..c1fb171c48 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + }, +) + + +class AutoMlTextClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextClassificationInputs', + ) + + +class AutoMlTextClassificationInputs(proto.Message): + r""" + Attributes: + multi_label (bool): + + """ + + multi_label = proto.Field( + proto.BOOL, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py new file mode 100644 index 0000000000..50963784c9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + }, +) + + +class AutoMlTextExtraction(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Extraction Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextExtractionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextExtractionInputs', + ) + + +class AutoMlTextExtractionInputs(proto.Message): + r""" """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py new file mode 100644 index 0000000000..9f571275b7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + }, +) + + +class AutoMlTextSentiment(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Sentiment Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextSentimentInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextSentimentInputs', + ) + + +class AutoMlTextSentimentInputs(proto.Message): + r""" + Attributes: + sentiment_max (int): + A sentiment is expressed as an integer + ordinal, where higher value means a more + positive sentiment. The range of sentiments that + will be used is between 0 and sentimentMax + (inclusive on both ends), and all the values in + the range must be represented in the dataset + before a model can be created. + Only the Annotations with this sentimentMax will + be used for training. sentimentMax value must be + between 1 and 10 (inclusive). + """ + + sentiment_max = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py new file mode 100644 index 0000000000..437445c667 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + }, +) + + +class AutoMlVideoActionRecognition(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video Action + Recognition Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoActionRecognitionInputs', + ) + + +class AutoMlVideoActionRecognitionInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + MOBILE_CORAL_VERSATILE_1 = 4 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py new file mode 100644 index 0000000000..d78158615a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + }, +) + + +class AutoMlVideoClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoClassificationInputs', + ) + + +class AutoMlVideoClassificationInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py new file mode 100644 index 0000000000..8ec377878b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + }, +) + + +class AutoMlVideoObjectTracking(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + ObjectTracking Model. + + Attributes: + inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoObjectTrackingInputs', + ) + + +class AutoMlVideoObjectTrackingInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_CORAL_VERSATILE_1 = 3 + MOBILE_CORAL_LOW_LATENCY_1 = 4 + MOBILE_JETSON_VERSATILE_1 = 5 + MOBILE_JETSON_LOW_LATENCY_1 = 6 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py new file mode 100644 index 0000000000..8ac69278ee --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.trainingjob.definition', + manifest={ + 'ExportEvaluatedDataItemsConfig', + }, +) + + +class ExportEvaluatedDataItemsConfig(proto.Message): + r"""Configuration for exporting test set predictions to a + BigQuery table. + + Attributes: + destination_bigquery_uri (str): + URI of desired destination BigQuery table. Expected format: + bq://:: + + If not specified, then results are exported to the following + auto-created BigQuery table: + :export_evaluated_examples__.evaluated_examples + override_existing_table (bool): + If true and an export destination is + specified, then the contents of the destination + are overwritten. Otherwise, if the export + destination already exists, then the export + operation fails. + """ + + destination_bigquery_uri = proto.Field( + proto.STRING, + number=1, + ) + override_existing_table = proto.Field( + proto.BOOL, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py new file mode 100644 index 0000000000..ec7999298c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py @@ -0,0 +1,390 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .services.dataset_service import DatasetServiceClient +from .services.dataset_service import DatasetServiceAsyncClient +from .services.endpoint_service import EndpointServiceClient +from .services.endpoint_service import EndpointServiceAsyncClient +from .services.job_service import JobServiceClient +from .services.job_service import JobServiceAsyncClient +from .services.migration_service import MigrationServiceClient +from .services.migration_service import MigrationServiceAsyncClient +from .services.model_service import ModelServiceClient +from .services.model_service import ModelServiceAsyncClient +from .services.pipeline_service import PipelineServiceClient +from .services.pipeline_service import PipelineServiceAsyncClient +from .services.prediction_service import PredictionServiceClient +from .services.prediction_service import PredictionServiceAsyncClient +from .services.specialist_pool_service import SpecialistPoolServiceClient +from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient + +from .types.accelerator_type import AcceleratorType +from .types.annotation import Annotation +from .types.annotation_spec import AnnotationSpec +from .types.artifact import Artifact +from .types.batch_prediction_job import BatchPredictionJob +from .types.completion_stats import CompletionStats +from .types.context import Context +from .types.custom_job import ContainerSpec +from .types.custom_job import CustomJob +from .types.custom_job import CustomJobSpec +from .types.custom_job import PythonPackageSpec +from .types.custom_job import Scheduling +from .types.custom_job import WorkerPoolSpec +from .types.data_item import DataItem +from .types.data_labeling_job import ActiveLearningConfig +from .types.data_labeling_job import DataLabelingJob +from .types.data_labeling_job import SampleConfig +from .types.data_labeling_job import TrainingConfig +from .types.dataset import Dataset +from .types.dataset import ExportDataConfig +from .types.dataset import ImportDataConfig +from .types.dataset_service import CreateDatasetOperationMetadata +from .types.dataset_service import CreateDatasetRequest +from .types.dataset_service import DeleteDatasetRequest +from .types.dataset_service import ExportDataOperationMetadata +from .types.dataset_service import ExportDataRequest +from .types.dataset_service import ExportDataResponse +from .types.dataset_service import GetAnnotationSpecRequest +from .types.dataset_service import GetDatasetRequest +from .types.dataset_service import ImportDataOperationMetadata +from .types.dataset_service import ImportDataRequest +from .types.dataset_service import ImportDataResponse +from .types.dataset_service import ListAnnotationsRequest +from .types.dataset_service import ListAnnotationsResponse +from .types.dataset_service import ListDataItemsRequest +from .types.dataset_service import ListDataItemsResponse +from .types.dataset_service import ListDatasetsRequest +from .types.dataset_service import ListDatasetsResponse +from .types.dataset_service import UpdateDatasetRequest +from .types.deployed_model_ref import DeployedModelRef +from .types.encryption_spec import EncryptionSpec +from .types.endpoint import DeployedModel +from .types.endpoint import Endpoint +from .types.endpoint_service import CreateEndpointOperationMetadata +from .types.endpoint_service import CreateEndpointRequest +from .types.endpoint_service import DeleteEndpointRequest +from .types.endpoint_service import DeployModelOperationMetadata +from .types.endpoint_service import DeployModelRequest +from .types.endpoint_service import DeployModelResponse +from .types.endpoint_service import GetEndpointRequest +from .types.endpoint_service import ListEndpointsRequest +from .types.endpoint_service import ListEndpointsResponse +from .types.endpoint_service import UndeployModelOperationMetadata +from .types.endpoint_service import UndeployModelRequest +from .types.endpoint_service import UndeployModelResponse +from .types.endpoint_service import UpdateEndpointRequest +from .types.env_var import EnvVar +from .types.execution import Execution +from .types.hyperparameter_tuning_job import HyperparameterTuningJob +from .types.io import BigQueryDestination +from .types.io import BigQuerySource +from .types.io import ContainerRegistryDestination +from .types.io import GcsDestination +from .types.io import GcsSource +from .types.job_service import CancelBatchPredictionJobRequest +from .types.job_service import CancelCustomJobRequest +from .types.job_service import CancelDataLabelingJobRequest +from .types.job_service import CancelHyperparameterTuningJobRequest +from .types.job_service import CreateBatchPredictionJobRequest +from .types.job_service import CreateCustomJobRequest +from .types.job_service import CreateDataLabelingJobRequest +from .types.job_service import CreateHyperparameterTuningJobRequest +from .types.job_service import DeleteBatchPredictionJobRequest +from .types.job_service import DeleteCustomJobRequest +from .types.job_service import DeleteDataLabelingJobRequest +from .types.job_service import DeleteHyperparameterTuningJobRequest +from .types.job_service import GetBatchPredictionJobRequest +from .types.job_service import GetCustomJobRequest +from .types.job_service import GetDataLabelingJobRequest +from .types.job_service import GetHyperparameterTuningJobRequest +from .types.job_service import ListBatchPredictionJobsRequest +from .types.job_service import ListBatchPredictionJobsResponse +from .types.job_service import ListCustomJobsRequest +from .types.job_service import ListCustomJobsResponse +from .types.job_service import ListDataLabelingJobsRequest +from .types.job_service import ListDataLabelingJobsResponse +from .types.job_service import ListHyperparameterTuningJobsRequest +from .types.job_service import ListHyperparameterTuningJobsResponse +from .types.job_state import JobState +from .types.machine_resources import AutomaticResources +from .types.machine_resources import AutoscalingMetricSpec +from .types.machine_resources import BatchDedicatedResources +from .types.machine_resources import DedicatedResources +from .types.machine_resources import DiskSpec +from .types.machine_resources import MachineSpec +from .types.machine_resources import ResourcesConsumed +from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from .types.migratable_resource import MigratableResource +from .types.migration_service import BatchMigrateResourcesOperationMetadata +from .types.migration_service import BatchMigrateResourcesRequest +from .types.migration_service import BatchMigrateResourcesResponse +from .types.migration_service import MigrateResourceRequest +from .types.migration_service import MigrateResourceResponse +from .types.migration_service import SearchMigratableResourcesRequest +from .types.migration_service import SearchMigratableResourcesResponse +from .types.model import Model +from .types.model import ModelContainerSpec +from .types.model import Port +from .types.model import PredictSchemata +from .types.model_evaluation import ModelEvaluation +from .types.model_evaluation_slice import ModelEvaluationSlice +from .types.model_service import DeleteModelRequest +from .types.model_service import ExportModelOperationMetadata +from .types.model_service import ExportModelRequest +from .types.model_service import ExportModelResponse +from .types.model_service import GetModelEvaluationRequest +from .types.model_service import GetModelEvaluationSliceRequest +from .types.model_service import GetModelRequest +from .types.model_service import ListModelEvaluationSlicesRequest +from .types.model_service import ListModelEvaluationSlicesResponse +from .types.model_service import ListModelEvaluationsRequest +from .types.model_service import ListModelEvaluationsResponse +from .types.model_service import ListModelsRequest +from .types.model_service import ListModelsResponse +from .types.model_service import UpdateModelRequest +from .types.model_service import UploadModelOperationMetadata +from .types.model_service import UploadModelRequest +from .types.model_service import UploadModelResponse +from .types.operation import DeleteOperationMetadata +from .types.operation import GenericOperationMetadata +from .types.pipeline_job import PipelineJob +from .types.pipeline_job import PipelineJobDetail +from .types.pipeline_job import PipelineTaskDetail +from .types.pipeline_job import PipelineTaskExecutorDetail +from .types.pipeline_service import CancelPipelineJobRequest +from .types.pipeline_service import CancelTrainingPipelineRequest +from .types.pipeline_service import CreatePipelineJobRequest +from .types.pipeline_service import CreateTrainingPipelineRequest +from .types.pipeline_service import DeletePipelineJobRequest +from .types.pipeline_service import DeleteTrainingPipelineRequest +from .types.pipeline_service import GetPipelineJobRequest +from .types.pipeline_service import GetTrainingPipelineRequest +from .types.pipeline_service import ListPipelineJobsRequest +from .types.pipeline_service import ListPipelineJobsResponse +from .types.pipeline_service import ListTrainingPipelinesRequest +from .types.pipeline_service import ListTrainingPipelinesResponse +from .types.pipeline_state import PipelineState +from .types.prediction_service import PredictRequest +from .types.prediction_service import PredictResponse +from .types.specialist_pool import SpecialistPool +from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata +from .types.specialist_pool_service import CreateSpecialistPoolRequest +from .types.specialist_pool_service import DeleteSpecialistPoolRequest +from .types.specialist_pool_service import GetSpecialistPoolRequest +from .types.specialist_pool_service import ListSpecialistPoolsRequest +from .types.specialist_pool_service import ListSpecialistPoolsResponse +from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata +from .types.specialist_pool_service import UpdateSpecialistPoolRequest +from .types.study import Measurement +from .types.study import StudySpec +from .types.study import Trial +from .types.training_pipeline import FilterSplit +from .types.training_pipeline import FractionSplit +from .types.training_pipeline import InputDataConfig +from .types.training_pipeline import PredefinedSplit +from .types.training_pipeline import TimestampSplit +from .types.training_pipeline import TrainingPipeline +from .types.user_action_reference import UserActionReference +from .types.value import Value + +__all__ = ( + 'DatasetServiceAsyncClient', + 'EndpointServiceAsyncClient', + 'JobServiceAsyncClient', + 'MigrationServiceAsyncClient', + 'ModelServiceAsyncClient', + 'PipelineServiceAsyncClient', + 'PredictionServiceAsyncClient', + 'SpecialistPoolServiceAsyncClient', +'AcceleratorType', +'ActiveLearningConfig', +'Annotation', +'AnnotationSpec', +'Artifact', +'AutomaticResources', +'AutoscalingMetricSpec', +'BatchDedicatedResources', +'BatchMigrateResourcesOperationMetadata', +'BatchMigrateResourcesRequest', +'BatchMigrateResourcesResponse', +'BatchPredictionJob', +'BigQueryDestination', +'BigQuerySource', +'CancelBatchPredictionJobRequest', +'CancelCustomJobRequest', +'CancelDataLabelingJobRequest', +'CancelHyperparameterTuningJobRequest', +'CancelPipelineJobRequest', +'CancelTrainingPipelineRequest', +'CompletionStats', +'ContainerRegistryDestination', +'ContainerSpec', +'Context', +'CreateBatchPredictionJobRequest', +'CreateCustomJobRequest', +'CreateDataLabelingJobRequest', +'CreateDatasetOperationMetadata', +'CreateDatasetRequest', +'CreateEndpointOperationMetadata', +'CreateEndpointRequest', +'CreateHyperparameterTuningJobRequest', +'CreatePipelineJobRequest', +'CreateSpecialistPoolOperationMetadata', +'CreateSpecialistPoolRequest', +'CreateTrainingPipelineRequest', +'CustomJob', +'CustomJobSpec', +'DataItem', +'DataLabelingJob', +'Dataset', +'DatasetServiceClient', +'DedicatedResources', +'DeleteBatchPredictionJobRequest', +'DeleteCustomJobRequest', +'DeleteDataLabelingJobRequest', +'DeleteDatasetRequest', +'DeleteEndpointRequest', +'DeleteHyperparameterTuningJobRequest', +'DeleteModelRequest', +'DeleteOperationMetadata', +'DeletePipelineJobRequest', +'DeleteSpecialistPoolRequest', +'DeleteTrainingPipelineRequest', +'DeployModelOperationMetadata', +'DeployModelRequest', +'DeployModelResponse', +'DeployedModel', +'DeployedModelRef', +'DiskSpec', +'EncryptionSpec', +'Endpoint', +'EndpointServiceClient', +'EnvVar', +'Execution', +'ExportDataConfig', +'ExportDataOperationMetadata', +'ExportDataRequest', +'ExportDataResponse', +'ExportModelOperationMetadata', +'ExportModelRequest', +'ExportModelResponse', +'FilterSplit', +'FractionSplit', +'GcsDestination', +'GcsSource', +'GenericOperationMetadata', +'GetAnnotationSpecRequest', +'GetBatchPredictionJobRequest', +'GetCustomJobRequest', +'GetDataLabelingJobRequest', +'GetDatasetRequest', +'GetEndpointRequest', +'GetHyperparameterTuningJobRequest', +'GetModelEvaluationRequest', +'GetModelEvaluationSliceRequest', +'GetModelRequest', +'GetPipelineJobRequest', +'GetSpecialistPoolRequest', +'GetTrainingPipelineRequest', +'HyperparameterTuningJob', +'ImportDataConfig', +'ImportDataOperationMetadata', +'ImportDataRequest', +'ImportDataResponse', +'InputDataConfig', +'JobServiceClient', +'JobState', +'ListAnnotationsRequest', +'ListAnnotationsResponse', +'ListBatchPredictionJobsRequest', +'ListBatchPredictionJobsResponse', +'ListCustomJobsRequest', +'ListCustomJobsResponse', +'ListDataItemsRequest', +'ListDataItemsResponse', +'ListDataLabelingJobsRequest', +'ListDataLabelingJobsResponse', +'ListDatasetsRequest', +'ListDatasetsResponse', +'ListEndpointsRequest', +'ListEndpointsResponse', +'ListHyperparameterTuningJobsRequest', +'ListHyperparameterTuningJobsResponse', +'ListModelEvaluationSlicesRequest', +'ListModelEvaluationSlicesResponse', +'ListModelEvaluationsRequest', +'ListModelEvaluationsResponse', +'ListModelsRequest', +'ListModelsResponse', +'ListPipelineJobsRequest', +'ListPipelineJobsResponse', +'ListSpecialistPoolsRequest', +'ListSpecialistPoolsResponse', +'ListTrainingPipelinesRequest', +'ListTrainingPipelinesResponse', +'MachineSpec', +'ManualBatchTuningParameters', +'Measurement', +'MigratableResource', +'MigrateResourceRequest', +'MigrateResourceResponse', +'MigrationServiceClient', +'Model', +'ModelContainerSpec', +'ModelEvaluation', +'ModelEvaluationSlice', +'ModelServiceClient', +'PipelineJob', +'PipelineJobDetail', +'PipelineServiceClient', +'PipelineState', +'PipelineTaskDetail', +'PipelineTaskExecutorDetail', +'Port', +'PredefinedSplit', +'PredictRequest', +'PredictResponse', +'PredictSchemata', +'PredictionServiceClient', +'PythonPackageSpec', +'ResourcesConsumed', +'SampleConfig', +'Scheduling', +'SearchMigratableResourcesRequest', +'SearchMigratableResourcesResponse', +'SpecialistPool', +'SpecialistPoolServiceClient', +'StudySpec', +'TimestampSplit', +'TrainingConfig', +'TrainingPipeline', +'Trial', +'UndeployModelOperationMetadata', +'UndeployModelRequest', +'UndeployModelResponse', +'UpdateDatasetRequest', +'UpdateEndpointRequest', +'UpdateModelRequest', +'UpdateSpecialistPoolOperationMetadata', +'UpdateSpecialistPoolRequest', +'UploadModelOperationMetadata', +'UploadModelRequest', +'UploadModelResponse', +'UserActionReference', +'Value', +'WorkerPoolSpec', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json new file mode 100644 index 0000000000..4b22c95676 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json @@ -0,0 +1,771 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform_v1", + "protoPackage": "google.cloud.aiplatform.v1", + "schema": "1.0", + "services": { + "DatasetService": { + "clients": { + "grpc": { + "libraryClient": "DatasetServiceClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListAnnotations": { + "methods": [ + "list_annotations" + ] + }, + "ListDataItems": { + "methods": [ + "list_data_items" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + } + } + }, + "grpc-async": { + "libraryClient": "DatasetServiceAsyncClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListAnnotations": { + "methods": [ + "list_annotations" + ] + }, + "ListDataItems": { + "methods": [ + "list_data_items" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + } + } + } + } + }, + "EndpointService": { + "clients": { + "grpc": { + "libraryClient": "EndpointServiceClient", + "rpcs": { + "CreateEndpoint": { + "methods": [ + "create_endpoint" + ] + }, + "DeleteEndpoint": { + "methods": [ + "delete_endpoint" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "GetEndpoint": { + "methods": [ + "get_endpoint" + ] + }, + "ListEndpoints": { + "methods": [ + "list_endpoints" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateEndpoint": { + "methods": [ + "update_endpoint" + ] + } + } + }, + "grpc-async": { + "libraryClient": "EndpointServiceAsyncClient", + "rpcs": { + "CreateEndpoint": { + "methods": [ + "create_endpoint" + ] + }, + "DeleteEndpoint": { + "methods": [ + "delete_endpoint" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "GetEndpoint": { + "methods": [ + "get_endpoint" + ] + }, + "ListEndpoints": { + "methods": [ + "list_endpoints" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateEndpoint": { + "methods": [ + "update_endpoint" + ] + } + } + } + } + }, + "JobService": { + "clients": { + "grpc": { + "libraryClient": "JobServiceClient", + "rpcs": { + "CancelBatchPredictionJob": { + "methods": [ + "cancel_batch_prediction_job" + ] + }, + "CancelCustomJob": { + "methods": [ + "cancel_custom_job" + ] + }, + "CancelDataLabelingJob": { + "methods": [ + "cancel_data_labeling_job" + ] + }, + "CancelHyperparameterTuningJob": { + "methods": [ + "cancel_hyperparameter_tuning_job" + ] + }, + "CreateBatchPredictionJob": { + "methods": [ + "create_batch_prediction_job" + ] + }, + "CreateCustomJob": { + "methods": [ + "create_custom_job" + ] + }, + "CreateDataLabelingJob": { + "methods": [ + "create_data_labeling_job" + ] + }, + "CreateHyperparameterTuningJob": { + "methods": [ + "create_hyperparameter_tuning_job" + ] + }, + "DeleteBatchPredictionJob": { + "methods": [ + "delete_batch_prediction_job" + ] + }, + "DeleteCustomJob": { + "methods": [ + "delete_custom_job" + ] + }, + "DeleteDataLabelingJob": { + "methods": [ + "delete_data_labeling_job" + ] + }, + "DeleteHyperparameterTuningJob": { + "methods": [ + "delete_hyperparameter_tuning_job" + ] + }, + "GetBatchPredictionJob": { + "methods": [ + "get_batch_prediction_job" + ] + }, + "GetCustomJob": { + "methods": [ + "get_custom_job" + ] + }, + "GetDataLabelingJob": { + "methods": [ + "get_data_labeling_job" + ] + }, + "GetHyperparameterTuningJob": { + "methods": [ + "get_hyperparameter_tuning_job" + ] + }, + "ListBatchPredictionJobs": { + "methods": [ + "list_batch_prediction_jobs" + ] + }, + "ListCustomJobs": { + "methods": [ + "list_custom_jobs" + ] + }, + "ListDataLabelingJobs": { + "methods": [ + "list_data_labeling_jobs" + ] + }, + "ListHyperparameterTuningJobs": { + "methods": [ + "list_hyperparameter_tuning_jobs" + ] + } + } + }, + "grpc-async": { + "libraryClient": "JobServiceAsyncClient", + "rpcs": { + "CancelBatchPredictionJob": { + "methods": [ + "cancel_batch_prediction_job" + ] + }, + "CancelCustomJob": { + "methods": [ + "cancel_custom_job" + ] + }, + "CancelDataLabelingJob": { + "methods": [ + "cancel_data_labeling_job" + ] + }, + "CancelHyperparameterTuningJob": { + "methods": [ + "cancel_hyperparameter_tuning_job" + ] + }, + "CreateBatchPredictionJob": { + "methods": [ + "create_batch_prediction_job" + ] + }, + "CreateCustomJob": { + "methods": [ + "create_custom_job" + ] + }, + "CreateDataLabelingJob": { + "methods": [ + "create_data_labeling_job" + ] + }, + "CreateHyperparameterTuningJob": { + "methods": [ + "create_hyperparameter_tuning_job" + ] + }, + "DeleteBatchPredictionJob": { + "methods": [ + "delete_batch_prediction_job" + ] + }, + "DeleteCustomJob": { + "methods": [ + "delete_custom_job" + ] + }, + "DeleteDataLabelingJob": { + "methods": [ + "delete_data_labeling_job" + ] + }, + "DeleteHyperparameterTuningJob": { + "methods": [ + "delete_hyperparameter_tuning_job" + ] + }, + "GetBatchPredictionJob": { + "methods": [ + "get_batch_prediction_job" + ] + }, + "GetCustomJob": { + "methods": [ + "get_custom_job" + ] + }, + "GetDataLabelingJob": { + "methods": [ + "get_data_labeling_job" + ] + }, + "GetHyperparameterTuningJob": { + "methods": [ + "get_hyperparameter_tuning_job" + ] + }, + "ListBatchPredictionJobs": { + "methods": [ + "list_batch_prediction_jobs" + ] + }, + "ListCustomJobs": { + "methods": [ + "list_custom_jobs" + ] + }, + "ListDataLabelingJobs": { + "methods": [ + "list_data_labeling_jobs" + ] + }, + "ListHyperparameterTuningJobs": { + "methods": [ + "list_hyperparameter_tuning_jobs" + ] + } + } + } + } + }, + "MigrationService": { + "clients": { + "grpc": { + "libraryClient": "MigrationServiceClient", + "rpcs": { + "BatchMigrateResources": { + "methods": [ + "batch_migrate_resources" + ] + }, + "SearchMigratableResources": { + "methods": [ + "search_migratable_resources" + ] + } + } + }, + "grpc-async": { + "libraryClient": "MigrationServiceAsyncClient", + "rpcs": { + "BatchMigrateResources": { + "methods": [ + "batch_migrate_resources" + ] + }, + "SearchMigratableResources": { + "methods": [ + "search_migratable_resources" + ] + } + } + } + } + }, + "ModelService": { + "clients": { + "grpc": { + "libraryClient": "ModelServiceClient", + "rpcs": { + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetModelEvaluationSlice": { + "methods": [ + "get_model_evaluation_slice" + ] + }, + "ListModelEvaluationSlices": { + "methods": [ + "list_model_evaluation_slices" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + }, + "UploadModel": { + "methods": [ + "upload_model" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ModelServiceAsyncClient", + "rpcs": { + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetModelEvaluationSlice": { + "methods": [ + "get_model_evaluation_slice" + ] + }, + "ListModelEvaluationSlices": { + "methods": [ + "list_model_evaluation_slices" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + }, + "UploadModel": { + "methods": [ + "upload_model" + ] + } + } + } + } + }, + "PipelineService": { + "clients": { + "grpc": { + "libraryClient": "PipelineServiceClient", + "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, + "CancelTrainingPipeline": { + "methods": [ + "cancel_training_pipeline" + ] + }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, + "CreateTrainingPipeline": { + "methods": [ + "create_training_pipeline" + ] + }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, + "DeleteTrainingPipeline": { + "methods": [ + "delete_training_pipeline" + ] + }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, + "GetTrainingPipeline": { + "methods": [ + "get_training_pipeline" + ] + }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, + "ListTrainingPipelines": { + "methods": [ + "list_training_pipelines" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PipelineServiceAsyncClient", + "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, + "CancelTrainingPipeline": { + "methods": [ + "cancel_training_pipeline" + ] + }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, + "CreateTrainingPipeline": { + "methods": [ + "create_training_pipeline" + ] + }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, + "DeleteTrainingPipeline": { + "methods": [ + "delete_training_pipeline" + ] + }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, + "GetTrainingPipeline": { + "methods": [ + "get_training_pipeline" + ] + }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, + "ListTrainingPipelines": { + "methods": [ + "list_training_pipelines" + ] + } + } + } + } + }, + "PredictionService": { + "clients": { + "grpc": { + "libraryClient": "PredictionServiceClient", + "rpcs": { + "Predict": { + "methods": [ + "predict" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PredictionServiceAsyncClient", + "rpcs": { + "Predict": { + "methods": [ + "predict" + ] + } + } + } + } + }, + "SpecialistPoolService": { + "clients": { + "grpc": { + "libraryClient": "SpecialistPoolServiceClient", + "rpcs": { + "CreateSpecialistPool": { + "methods": [ + "create_specialist_pool" + ] + }, + "DeleteSpecialistPool": { + "methods": [ + "delete_specialist_pool" + ] + }, + "GetSpecialistPool": { + "methods": [ + "get_specialist_pool" + ] + }, + "ListSpecialistPools": { + "methods": [ + "list_specialist_pools" + ] + }, + "UpdateSpecialistPool": { + "methods": [ + "update_specialist_pool" + ] + } + } + }, + "grpc-async": { + "libraryClient": "SpecialistPoolServiceAsyncClient", + "rpcs": { + "CreateSpecialistPool": { + "methods": [ + "create_specialist_pool" + ] + }, + "DeleteSpecialistPool": { + "methods": [ + "delete_specialist_pool" + ] + }, + "GetSpecialistPool": { + "methods": [ + "get_specialist_pool" + ] + }, + "ListSpecialistPools": { + "methods": [ + "list_specialist_pools" + ] + }, + "UpdateSpecialistPool": { + "methods": [ + "update_specialist_pool" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed new file mode 100644 index 0000000000..228f1c51c6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py new file mode 100644 index 0000000000..44e8fb2115 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import DatasetServiceClient +from .async_client import DatasetServiceAsyncClient + +__all__ = ( + 'DatasetServiceClient', + 'DatasetServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py new file mode 100644 index 0000000000..1916feda8c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py @@ -0,0 +1,1074 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.dataset_service import pagers +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport +from .client import DatasetServiceClient + + +class DatasetServiceAsyncClient: + """""" + + _client: DatasetServiceClient + + DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT + + annotation_path = staticmethod(DatasetServiceClient.annotation_path) + parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path) + annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path) + parse_annotation_spec_path = staticmethod(DatasetServiceClient.parse_annotation_spec_path) + data_item_path = staticmethod(DatasetServiceClient.data_item_path) + parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path) + dataset_path = staticmethod(DatasetServiceClient.dataset_path) + parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path) + common_billing_account_path = staticmethod(DatasetServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(DatasetServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(DatasetServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(DatasetServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(DatasetServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(DatasetServiceClient.parse_common_organization_path) + common_project_path = staticmethod(DatasetServiceClient.common_project_path) + parse_common_project_path = staticmethod(DatasetServiceClient.parse_common_project_path) + common_location_path = staticmethod(DatasetServiceClient.common_location_path) + parse_common_location_path = staticmethod(DatasetServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceAsyncClient: The constructed client. + """ + return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceAsyncClient: The constructed client. + """ + return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DatasetServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DatasetServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, DatasetServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the dataset service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.DatasetServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = DatasetServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_dataset(self, + request: dataset_service.CreateDatasetRequest = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateDatasetRequest`): + The request object. Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Dataset in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`): + Required. The Dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.Dataset` A + collection of DataItems and Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_dataset.Dataset, + metadata_type=dataset_service.CreateDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_dataset(self, + request: dataset_service.GetDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetDatasetRequest`): + The request object. Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. + name (:class:`str`): + Required. The name of the Dataset + resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_dataset(self, + request: dataset_service.UpdateDatasetRequest = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateDatasetRequest`): + The request object. Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. + dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`): + Required. The Dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_datasets(self, + request: dataset_service.ListDatasetsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsAsyncPager: + r"""Lists Datasets in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListDatasetsRequest`): + The request object. Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + parent (:class:`str`): + Required. The name of the Dataset's parent resource. + Format: ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsAsyncPager: + Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_datasets, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatasetsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_dataset(self, + request: dataset_service.DeleteDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteDatasetRequest`): + The request object. Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_data(self, + request: dataset_service.ImportDataRequest = None, + *, + name: str = None, + import_configs: Sequence[dataset.ImportDataConfig] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports data into a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ImportDataRequest`): + The request object. Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + name (:class:`str`): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_configs (:class:`Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]`): + Required. The desired input + locations. The contents of all input + locations will be imported in one batch. + + This corresponds to the ``import_configs`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` + Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, import_configs]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if import_configs: + request.import_configs.extend(import_configs) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + dataset_service.ImportDataResponse, + metadata_type=dataset_service.ImportDataOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_data(self, + request: dataset_service.ExportDataRequest = None, + *, + name: str = None, + export_config: dataset.ExportDataConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports data from a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ExportDataRequest`): + The request object. Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + name (:class:`str`): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + export_config (:class:`google.cloud.aiplatform_v1.types.ExportDataConfig`): + Required. The desired output + location. + + This corresponds to the ``export_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` + Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, export_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if export_config is not None: + request.export_config = export_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + dataset_service.ExportDataResponse, + metadata_type=dataset_service.ExportDataOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_data_items(self, + request: dataset_service.ListDataItemsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataItemsAsyncPager: + r"""Lists DataItems in a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListDataItemsRequest`): + The request object. Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + parent (:class:`str`): + Required. The resource name of the Dataset to list + DataItems from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsAsyncPager: + Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListDataItemsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_data_items, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDataItemsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_annotation_spec(self, + request: dataset_service.GetAnnotationSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an AnnotationSpec. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest`): + The request object. Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. + name (:class:`str`): + Required. The name of the AnnotationSpec resource. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AnnotationSpec: + Identifies a concept with which + DataItems may be annotated with. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_annotation_spec, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_annotations(self, + request: dataset_service.ListAnnotationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAnnotationsAsyncPager: + r"""Lists Annotations belongs to a dataitem + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListAnnotationsRequest`): + The request object. Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + parent (:class:`str`): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsAsyncPager: + Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListAnnotationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_annotations, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAnnotationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DatasetServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py new file mode 100644 index 0000000000..05e5c9a88e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py @@ -0,0 +1,1288 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.dataset_service import pagers +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import DatasetServiceGrpcTransport +from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport + + +class DatasetServiceClientMeta(type): + """Metaclass for the DatasetService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] + _transport_registry["grpc"] = DatasetServiceGrpcTransport + _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[DatasetServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DatasetServiceClient(metaclass=DatasetServiceClientMeta): + """""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DatasetServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DatasetServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotation_path(project: str,location: str,dataset: str,data_item: str,annotation: str,) -> str: + """Returns a fully-qualified annotation string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) + + @staticmethod + def parse_annotation_path(path: str) -> Dict[str,str]: + """Parses a annotation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: + """Returns a fully-qualified annotation_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + + @staticmethod + def parse_annotation_spec_path(path: str) -> Dict[str,str]: + """Parses a annotation_spec path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def data_item_path(project: str,location: str,dataset: str,data_item: str,) -> str: + """Returns a fully-qualified data_item string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) + + @staticmethod + def parse_data_item_path(path: str) -> Dict[str,str]: + """Parses a data_item path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, DatasetServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the dataset service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, DatasetServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, DatasetServiceTransport): + # transport is a DatasetServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_dataset(self, + request: dataset_service.CreateDatasetRequest = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a Dataset. + + Args: + request (google.cloud.aiplatform_v1.types.CreateDatasetRequest): + The request object. Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. + parent (str): + Required. The resource name of the Location to create + the Dataset in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (google.cloud.aiplatform_v1.types.Dataset): + Required. The Dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.Dataset` A + collection of DataItems and Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.CreateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.CreateDatasetRequest): + request = dataset_service.CreateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_dataset.Dataset, + metadata_type=dataset_service.CreateDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + def get_dataset(self, + request: dataset_service.GetDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a Dataset. + + Args: + request (google.cloud.aiplatform_v1.types.GetDatasetRequest): + The request object. Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. + name (str): + Required. The name of the Dataset + resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.GetDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.GetDatasetRequest): + request = dataset_service.GetDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_dataset(self, + request: dataset_service.UpdateDatasetRequest = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a Dataset. + + Args: + request (google.cloud.aiplatform_v1.types.UpdateDatasetRequest): + The request object. Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. + dataset (google.cloud.aiplatform_v1.types.Dataset): + Required. The Dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.UpdateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.UpdateDatasetRequest): + request = dataset_service.UpdateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_datasets(self, + request: dataset_service.ListDatasetsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsPager: + r"""Lists Datasets in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): + The request object. Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + parent (str): + Required. The name of the Dataset's parent resource. + Format: ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsPager: + Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListDatasetsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListDatasetsRequest): + request = dataset_service.ListDatasetsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_datasets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatasetsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_dataset(self, + request: dataset_service.DeleteDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Dataset. + + Args: + request (google.cloud.aiplatform_v1.types.DeleteDatasetRequest): + The request object. Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. + name (str): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.DeleteDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.DeleteDatasetRequest): + request = dataset_service.DeleteDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def import_data(self, + request: dataset_service.ImportDataRequest = None, + *, + name: str = None, + import_configs: Sequence[dataset.ImportDataConfig] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports data into a Dataset. + + Args: + request (google.cloud.aiplatform_v1.types.ImportDataRequest): + The request object. Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]): + Required. The desired input + locations. The contents of all input + locations will be imported in one batch. + + This corresponds to the ``import_configs`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` + Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, import_configs]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ImportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ImportDataRequest): + request = dataset_service.ImportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if import_configs is not None: + request.import_configs = import_configs + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + dataset_service.ImportDataResponse, + metadata_type=dataset_service.ImportDataOperationMetadata, + ) + + # Done; return the response. + return response + + def export_data(self, + request: dataset_service.ExportDataRequest = None, + *, + name: str = None, + export_config: dataset.ExportDataConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports data from a Dataset. + + Args: + request (google.cloud.aiplatform_v1.types.ExportDataRequest): + The request object. Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + export_config (google.cloud.aiplatform_v1.types.ExportDataConfig): + Required. The desired output + location. + + This corresponds to the ``export_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` + Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, export_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ExportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ExportDataRequest): + request = dataset_service.ExportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if export_config is not None: + request.export_config = export_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + dataset_service.ExportDataResponse, + metadata_type=dataset_service.ExportDataOperationMetadata, + ) + + # Done; return the response. + return response + + def list_data_items(self, + request: dataset_service.ListDataItemsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataItemsPager: + r"""Lists DataItems in a Dataset. + + Args: + request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): + The request object. Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + parent (str): + Required. The resource name of the Dataset to list + DataItems from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsPager: + Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListDataItemsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListDataItemsRequest): + request = dataset_service.ListDataItemsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_data_items] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDataItemsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_annotation_spec(self, + request: dataset_service.GetAnnotationSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an AnnotationSpec. + + Args: + request (google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest): + The request object. Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. + name (str): + Required. The name of the AnnotationSpec resource. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.AnnotationSpec: + Identifies a concept with which + DataItems may be annotated with. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.GetAnnotationSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.GetAnnotationSpecRequest): + request = dataset_service.GetAnnotationSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_annotations(self, + request: dataset_service.ListAnnotationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAnnotationsPager: + r"""Lists Annotations belongs to a dataitem + + Args: + request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): + The request object. Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + parent (str): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsPager: + Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListAnnotationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListAnnotationsRequest): + request = dataset_service.ListAnnotationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_annotations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAnnotationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DatasetServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py new file mode 100644 index 0000000000..6621ccddce --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py @@ -0,0 +1,387 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset_service + + +class ListDatasetsPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListDatasetsResponse], + request: dataset_service.ListDatasetsRequest, + response: dataset_service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[dataset_service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[dataset.Dataset]: + for page in self.pages: + yield from page.datasets + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDatasetsAsyncPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListDatasetsResponse]], + request: dataset_service.ListDatasetsRequest, + response: dataset_service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[dataset_service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[dataset.Dataset]: + async def async_generator(): + async for page in self.pages: + for response in page.datasets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataItemsPager: + """A pager for iterating through ``list_data_items`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``data_items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDataItems`` requests and continue to iterate + through the ``data_items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListDataItemsResponse], + request: dataset_service.ListDataItemsRequest, + response: dataset_service.ListDataItemsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDataItemsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDataItemsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[dataset_service.ListDataItemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[data_item.DataItem]: + for page in self.pages: + yield from page.data_items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataItemsAsyncPager: + """A pager for iterating through ``list_data_items`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``data_items`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDataItems`` requests and continue to iterate + through the ``data_items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListDataItemsResponse]], + request: dataset_service.ListDataItemsRequest, + response: dataset_service.ListDataItemsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDataItemsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDataItemsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[dataset_service.ListDataItemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[data_item.DataItem]: + async def async_generator(): + async for page in self.pages: + for response in page.data_items: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAnnotationsPager: + """A pager for iterating through ``list_annotations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``annotations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAnnotations`` requests and continue to iterate + through the ``annotations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListAnnotationsResponse], + request: dataset_service.ListAnnotationsRequest, + response: dataset_service.ListAnnotationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListAnnotationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[dataset_service.ListAnnotationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[annotation.Annotation]: + for page in self.pages: + yield from page.annotations + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAnnotationsAsyncPager: + """A pager for iterating through ``list_annotations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``annotations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAnnotations`` requests and continue to iterate + through the ``annotations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListAnnotationsResponse]], + request: dataset_service.ListAnnotationsRequest, + response: dataset_service.ListAnnotationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListAnnotationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[dataset_service.ListAnnotationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[annotation.Annotation]: + async def async_generator(): + async for page in self.pages: + for response in page.annotations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py new file mode 100644 index 0000000000..561b0c5cfd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import DatasetServiceTransport +from .grpc import DatasetServiceGrpcTransport +from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] +_transport_registry['grpc'] = DatasetServiceGrpcTransport +_transport_registry['grpc_asyncio'] = DatasetServiceGrpcAsyncIOTransport + +__all__ = ( + 'DatasetServiceTransport', + 'DatasetServiceGrpcTransport', + 'DatasetServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py new file mode 100644 index 0000000000..4f895a87f0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py @@ -0,0 +1,304 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class DatasetServiceTransport(abc.ABC): + """Abstract transport class for DatasetService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_dataset: gapic_v1.method.wrap_method( + self.create_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.get_dataset: gapic_v1.method.wrap_method( + self.get_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.update_dataset: gapic_v1.method.wrap_method( + self.update_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.list_datasets: gapic_v1.method.wrap_method( + self.list_datasets, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_dataset: gapic_v1.method.wrap_method( + self.delete_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.import_data: gapic_v1.method.wrap_method( + self.import_data, + default_timeout=5.0, + client_info=client_info, + ), + self.export_data: gapic_v1.method.wrap_method( + self.export_data, + default_timeout=5.0, + client_info=client_info, + ), + self.list_data_items: gapic_v1.method.wrap_method( + self.list_data_items, + default_timeout=5.0, + client_info=client_info, + ), + self.get_annotation_spec: gapic_v1.method.wrap_method( + self.get_annotation_spec, + default_timeout=5.0, + client_info=client_info, + ), + self.list_annotations: gapic_v1.method.wrap_method( + self.list_annotations, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + Union[ + dataset.Dataset, + Awaitable[dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + Union[ + gca_dataset.Dataset, + Awaitable[gca_dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + Union[ + dataset_service.ListDatasetsResponse, + Awaitable[dataset_service.ListDatasetsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + Union[ + dataset_service.ListDataItemsResponse, + Awaitable[dataset_service.ListDataItemsResponse] + ]]: + raise NotImplementedError() + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + Union[ + annotation_spec.AnnotationSpec, + Awaitable[annotation_spec.AnnotationSpec] + ]]: + raise NotImplementedError() + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + Union[ + dataset_service.ListAnnotationsResponse, + Awaitable[dataset_service.ListAnnotationsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'DatasetServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py new file mode 100644 index 0000000000..d3b6f3ff83 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py @@ -0,0 +1,506 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.longrunning import operations_pb2 # type: ignore +from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO + + +class DatasetServiceGrpcTransport(DatasetServiceTransport): + """gRPC backend transport for DatasetService. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a Dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/CreateDataset', + request_serializer=dataset_service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + dataset.Dataset]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a Dataset. + + Returns: + Callable[[~.GetDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/GetDataset', + request_serializer=dataset_service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + gca_dataset.Dataset]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a Dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/UpdateDataset', + request_serializer=dataset_service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + dataset_service.ListDatasetsResponse]: + r"""Return a callable for the list datasets method over gRPC. + + Lists Datasets in a Location. + + Returns: + Callable[[~.ListDatasetsRequest], + ~.ListDatasetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListDatasets', + request_serializer=dataset_service.ListDatasetsRequest.serialize, + response_deserializer=dataset_service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a Dataset. + + Returns: + Callable[[~.DeleteDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/DeleteDataset', + request_serializer=dataset_service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a Dataset. + + Returns: + Callable[[~.ImportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ImportData', + request_serializer=dataset_service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the export data method over gRPC. + + Exports data from a Dataset. + + Returns: + Callable[[~.ExportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ExportData', + request_serializer=dataset_service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + dataset_service.ListDataItemsResponse]: + r"""Return a callable for the list data items method over gRPC. + + Lists DataItems in a Dataset. + + Returns: + Callable[[~.ListDataItemsRequest], + ~.ListDataItemsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_items' not in self._stubs: + self._stubs['list_data_items'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListDataItems', + request_serializer=dataset_service.ListDataItemsRequest.serialize, + response_deserializer=dataset_service.ListDataItemsResponse.deserialize, + ) + return self._stubs['list_data_items'] + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + annotation_spec.AnnotationSpec]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an AnnotationSpec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + ~.AnnotationSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec', + request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + dataset_service.ListAnnotationsResponse]: + r"""Return a callable for the list annotations method over gRPC. + + Lists Annotations belongs to a dataitem + + Returns: + Callable[[~.ListAnnotationsRequest], + ~.ListAnnotationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_annotations' not in self._stubs: + self._stubs['list_annotations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListAnnotations', + request_serializer=dataset_service.ListAnnotationsRequest.serialize, + response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, + ) + return self._stubs['list_annotations'] + + +__all__ = ( + 'DatasetServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..3bf60c814a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py @@ -0,0 +1,510 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.longrunning import operations_pb2 # type: ignore +from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import DatasetServiceGrpcTransport + + +class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport): + """gRPC AsyncIO backend transport for DatasetService. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a Dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/CreateDataset', + request_serializer=dataset_service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + Awaitable[dataset.Dataset]]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a Dataset. + + Returns: + Callable[[~.GetDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/GetDataset', + request_serializer=dataset_service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a Dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/UpdateDataset', + request_serializer=dataset_service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + Awaitable[dataset_service.ListDatasetsResponse]]: + r"""Return a callable for the list datasets method over gRPC. + + Lists Datasets in a Location. + + Returns: + Callable[[~.ListDatasetsRequest], + Awaitable[~.ListDatasetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListDatasets', + request_serializer=dataset_service.ListDatasetsRequest.serialize, + response_deserializer=dataset_service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a Dataset. + + Returns: + Callable[[~.DeleteDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/DeleteDataset', + request_serializer=dataset_service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a Dataset. + + Returns: + Callable[[~.ImportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ImportData', + request_serializer=dataset_service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export data method over gRPC. + + Exports data from a Dataset. + + Returns: + Callable[[~.ExportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ExportData', + request_serializer=dataset_service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + Awaitable[dataset_service.ListDataItemsResponse]]: + r"""Return a callable for the list data items method over gRPC. + + Lists DataItems in a Dataset. + + Returns: + Callable[[~.ListDataItemsRequest], + Awaitable[~.ListDataItemsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_items' not in self._stubs: + self._stubs['list_data_items'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListDataItems', + request_serializer=dataset_service.ListDataItemsRequest.serialize, + response_deserializer=dataset_service.ListDataItemsResponse.deserialize, + ) + return self._stubs['list_data_items'] + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + Awaitable[annotation_spec.AnnotationSpec]]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an AnnotationSpec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + Awaitable[~.AnnotationSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec', + request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + Awaitable[dataset_service.ListAnnotationsResponse]]: + r"""Return a callable for the list annotations method over gRPC. + + Lists Annotations belongs to a dataitem + + Returns: + Callable[[~.ListAnnotationsRequest], + Awaitable[~.ListAnnotationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_annotations' not in self._stubs: + self._stubs['list_annotations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.DatasetService/ListAnnotations', + request_serializer=dataset_service.ListAnnotationsRequest.serialize, + response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, + ) + return self._stubs['list_annotations'] + + +__all__ = ( + 'DatasetServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py new file mode 100644 index 0000000000..7db43e768e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import EndpointServiceClient +from .async_client import EndpointServiceAsyncClient + +__all__ = ( + 'EndpointServiceClient', + 'EndpointServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py new file mode 100644 index 0000000000..16b4fe3f32 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -0,0 +1,860 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport +from .client import EndpointServiceClient + + +class EndpointServiceAsyncClient: + """""" + + _client: EndpointServiceClient + + DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(EndpointServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) + model_path = staticmethod(EndpointServiceClient.model_path) + parse_model_path = staticmethod(EndpointServiceClient.parse_model_path) + common_billing_account_path = staticmethod(EndpointServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(EndpointServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(EndpointServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(EndpointServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(EndpointServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(EndpointServiceClient.parse_common_organization_path) + common_project_path = staticmethod(EndpointServiceClient.common_project_path) + parse_common_project_path = staticmethod(EndpointServiceClient.parse_common_project_path) + common_location_path = staticmethod(EndpointServiceClient.common_location_path) + parse_common_location_path = staticmethod(EndpointServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceAsyncClient: The constructed client. + """ + return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceAsyncClient: The constructed client. + """ + return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> EndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EndpointServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, EndpointServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.EndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = EndpointServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_endpoint(self, + request: endpoint_service.CreateEndpointRequest = None, + *, + parent: str = None, + endpoint: gca_endpoint.Endpoint = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an Endpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateEndpointRequest`): + The request object. Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Endpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`): + Required. The Endpoint to create. + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, endpoint]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.CreateEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if endpoint is not None: + request.endpoint = endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_endpoint.Endpoint, + metadata_type=endpoint_service.CreateEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_endpoint(self, + request: endpoint_service.GetEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> endpoint.Endpoint: + r"""Gets an Endpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetEndpointRequest`): + The request object. Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] + name (:class:`str`): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.GetEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_endpoints(self, + request: endpoint_service.ListEndpointsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEndpointsAsyncPager: + r"""Lists Endpoints in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListEndpointsRequest`): + The request object. Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager: + Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.ListEndpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_endpoints, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEndpointsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_endpoint(self, + request: endpoint_service.UpdateEndpointRequest = None, + *, + endpoint: gca_endpoint.Endpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_endpoint.Endpoint: + r"""Updates an Endpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateEndpointRequest`): + The request object. Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`): + Required. The Endpoint which replaces + the resource on the server. + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.UpdateEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint.name", request.endpoint.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_endpoint(self, + request: endpoint_service.DeleteEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Endpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteEndpointRequest`): + The request object. Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. + name (:class:`str`): + Required. The name of the Endpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.DeleteEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_model(self, + request: endpoint_service.DeployModelRequest = None, + *, + endpoint: str = None, + deployed_model: gca_endpoint.DeployedModel = None, + traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeployModelRequest`): + The request object. Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + endpoint (:class:`str`): + Required. The name of the Endpoint resource into which + to deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model (:class:`google.cloud.aiplatform_v1.types.DeployedModel`): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + + This corresponds to the ``deployed_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]`): + A map from a DeployedModel's ID to the percentage of + this Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the + just being deployed Model, a "0" should be used, and the + actual ID of the new DeployedModel will be filled in its + place by this method. The traffic percentage values must + add up to 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + is not updated. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` + Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model is not None: + request.deployed_model = deployed_model + + if traffic_split: + request.traffic_split.update(traffic_split) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + endpoint_service.DeployModelResponse, + metadata_type=endpoint_service.DeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_model(self, + request: endpoint_service.UndeployModelRequest = None, + *, + endpoint: str = None, + deployed_model_id: str = None, + traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UndeployModelRequest`): + The request object. Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + endpoint (:class:`str`): + Required. The name of the Endpoint resource from which + to undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + Required. The ID of the DeployedModel + to be undeployed from the Endpoint. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]`): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is + being undeployed from the Endpoint, the + [Endpoint.traffic_split] will always end up empty when + this call returns. A DeployedModel will be successfully + undeployed only if it doesn't have any traffic assigned + to it when this method executes, or if this field + unassigns any traffic to it. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` + Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + if traffic_split: + request.traffic_split.update(traffic_split) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + endpoint_service.UndeployModelResponse, + metadata_type=endpoint_service.UndeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "EndpointServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py new file mode 100644 index 0000000000..eb2fa99da5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -0,0 +1,1054 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import EndpointServiceGrpcTransport +from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport + + +class EndpointServiceClientMeta(type): + """Metaclass for the EndpointService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] + _transport_registry["grpc"] = EndpointServiceGrpcTransport + _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[EndpointServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class EndpointServiceClient(metaclass=EndpointServiceClientMeta): + """""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> EndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EndpointServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, EndpointServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, EndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, EndpointServiceTransport): + # transport is a EndpointServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_endpoint(self, + request: endpoint_service.CreateEndpointRequest = None, + *, + parent: str = None, + endpoint: gca_endpoint.Endpoint = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an Endpoint. + + Args: + request (google.cloud.aiplatform_v1.types.CreateEndpointRequest): + The request object. Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. + parent (str): + Required. The resource name of the Location to create + the Endpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint (google.cloud.aiplatform_v1.types.Endpoint): + Required. The Endpoint to create. + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, endpoint]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.CreateEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.CreateEndpointRequest): + request = endpoint_service.CreateEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if endpoint is not None: + request.endpoint = endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_endpoint.Endpoint, + metadata_type=endpoint_service.CreateEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + def get_endpoint(self, + request: endpoint_service.GetEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> endpoint.Endpoint: + r"""Gets an Endpoint. + + Args: + request (google.cloud.aiplatform_v1.types.GetEndpointRequest): + The request object. Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] + name (str): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.GetEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.GetEndpointRequest): + request = endpoint_service.GetEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_endpoints(self, + request: endpoint_service.ListEndpointsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEndpointsPager: + r"""Lists Endpoints in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): + The request object. Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + parent (str): + Required. The resource name of the Location from which + to list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsPager: + Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.ListEndpointsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.ListEndpointsRequest): + request = endpoint_service.ListEndpointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_endpoints] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEndpointsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_endpoint(self, + request: endpoint_service.UpdateEndpointRequest = None, + *, + endpoint: gca_endpoint.Endpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_endpoint.Endpoint: + r"""Updates an Endpoint. + + Args: + request (google.cloud.aiplatform_v1.types.UpdateEndpointRequest): + The request object. Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + endpoint (google.cloud.aiplatform_v1.types.Endpoint): + Required. The Endpoint which replaces + the resource on the server. + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.UpdateEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.UpdateEndpointRequest): + request = endpoint_service.UpdateEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint.name", request.endpoint.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_endpoint(self, + request: endpoint_service.DeleteEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Endpoint. + + Args: + request (google.cloud.aiplatform_v1.types.DeleteEndpointRequest): + The request object. Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. + name (str): + Required. The name of the Endpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.DeleteEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.DeleteEndpointRequest): + request = endpoint_service.DeleteEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_model(self, + request: endpoint_service.DeployModelRequest = None, + *, + endpoint: str = None, + deployed_model: gca_endpoint.DeployedModel = None, + traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Args: + request (google.cloud.aiplatform_v1.types.DeployModelRequest): + The request object. Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + endpoint (str): + Required. The name of the Endpoint resource into which + to deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + + This corresponds to the ``deployed_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]): + A map from a DeployedModel's ID to the percentage of + this Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the + just being deployed Model, a "0" should be used, and the + actual ID of the new DeployedModel will be filled in its + place by this method. The traffic percentage values must + add up to 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + is not updated. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` + Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.DeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.DeployModelRequest): + request = endpoint_service.DeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model is not None: + request.deployed_model = deployed_model + if traffic_split is not None: + request.traffic_split = traffic_split + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + endpoint_service.DeployModelResponse, + metadata_type=endpoint_service.DeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_model(self, + request: endpoint_service.UndeployModelRequest = None, + *, + endpoint: str = None, + deployed_model_id: str = None, + traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Args: + request (google.cloud.aiplatform_v1.types.UndeployModelRequest): + The request object. Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + endpoint (str): + Required. The name of the Endpoint resource from which + to undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + Required. The ID of the DeployedModel + to be undeployed from the Endpoint. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is + being undeployed from the Endpoint, the + [Endpoint.traffic_split] will always end up empty when + this call returns. A DeployedModel will be successfully + undeployed only if it doesn't have any traffic assigned + to it when this method executes, or if this field + unassigns any traffic to it. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` + Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.UndeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.UndeployModelRequest): + request = endpoint_service.UndeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + if traffic_split is not None: + request.traffic_split = traffic_split + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + endpoint_service.UndeployModelResponse, + metadata_type=endpoint_service.UndeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "EndpointServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py new file mode 100644 index 0000000000..98d4f5eda7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint_service + + +class ListEndpointsPager: + """A pager for iterating through ``list_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``endpoints`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEndpoints`` requests and continue to iterate + through the ``endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., endpoint_service.ListEndpointsResponse], + request: endpoint_service.ListEndpointsRequest, + response: endpoint_service.ListEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = endpoint_service.ListEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[endpoint_service.ListEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[endpoint.Endpoint]: + for page in self.pages: + yield from page.endpoints + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEndpointsAsyncPager: + """A pager for iterating through ``list_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``endpoints`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEndpoints`` requests and continue to iterate + through the ``endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[endpoint_service.ListEndpointsResponse]], + request: endpoint_service.ListEndpointsRequest, + response: endpoint_service.ListEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = endpoint_service.ListEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[endpoint_service.ListEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[endpoint.Endpoint]: + async def async_generator(): + async for page in self.pages: + for response in page.endpoints: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py new file mode 100644 index 0000000000..a062fc074c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import EndpointServiceTransport +from .grpc import EndpointServiceGrpcTransport +from .grpc_asyncio import EndpointServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] +_transport_registry['grpc'] = EndpointServiceGrpcTransport +_transport_registry['grpc_asyncio'] = EndpointServiceGrpcAsyncIOTransport + +__all__ = ( + 'EndpointServiceTransport', + 'EndpointServiceGrpcTransport', + 'EndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py new file mode 100644 index 0000000000..fa9af900e8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class EndpointServiceTransport(abc.ABC): + """Abstract transport class for EndpointService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_endpoint: gapic_v1.method.wrap_method( + self.create_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.get_endpoint: gapic_v1.method.wrap_method( + self.get_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.list_endpoints: gapic_v1.method.wrap_method( + self.list_endpoints, + default_timeout=5.0, + client_info=client_info, + ), + self.update_endpoint: gapic_v1.method.wrap_method( + self.update_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_endpoint: gapic_v1.method.wrap_method( + self.delete_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.deploy_model: gapic_v1.method.wrap_method( + self.deploy_model, + default_timeout=5.0, + client_info=client_info, + ), + self.undeploy_model: gapic_v1.method.wrap_method( + self.undeploy_model, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + Union[ + endpoint.Endpoint, + Awaitable[endpoint.Endpoint] + ]]: + raise NotImplementedError() + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + Union[ + endpoint_service.ListEndpointsResponse, + Awaitable[endpoint_service.ListEndpointsResponse] + ]]: + raise NotImplementedError() + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + Union[ + gca_endpoint.Endpoint, + Awaitable[gca_endpoint.Endpoint] + ]]: + raise NotImplementedError() + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'EndpointServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py new file mode 100644 index 0000000000..c1f467a4db --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py @@ -0,0 +1,430 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.longrunning import operations_pb2 # type: ignore +from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO + + +class EndpointServiceGrpcTransport(EndpointServiceTransport): + """gRPC backend transport for EndpointService. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the create endpoint method over gRPC. + + Creates an Endpoint. + + Returns: + Callable[[~.CreateEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_endpoint' not in self._stubs: + self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint', + request_serializer=endpoint_service.CreateEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_endpoint'] + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + endpoint.Endpoint]: + r"""Return a callable for the get endpoint method over gRPC. + + Gets an Endpoint. + + Returns: + Callable[[~.GetEndpointRequest], + ~.Endpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_endpoint' not in self._stubs: + self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/GetEndpoint', + request_serializer=endpoint_service.GetEndpointRequest.serialize, + response_deserializer=endpoint.Endpoint.deserialize, + ) + return self._stubs['get_endpoint'] + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + endpoint_service.ListEndpointsResponse]: + r"""Return a callable for the list endpoints method over gRPC. + + Lists Endpoints in a Location. + + Returns: + Callable[[~.ListEndpointsRequest], + ~.ListEndpointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_endpoints' not in self._stubs: + self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/ListEndpoints', + request_serializer=endpoint_service.ListEndpointsRequest.serialize, + response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, + ) + return self._stubs['list_endpoints'] + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + gca_endpoint.Endpoint]: + r"""Return a callable for the update endpoint method over gRPC. + + Updates an Endpoint. + + Returns: + Callable[[~.UpdateEndpointRequest], + ~.Endpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_endpoint' not in self._stubs: + self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint', + request_serializer=endpoint_service.UpdateEndpointRequest.serialize, + response_deserializer=gca_endpoint.Endpoint.deserialize, + ) + return self._stubs['update_endpoint'] + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete endpoint method over gRPC. + + Deletes an Endpoint. + + Returns: + Callable[[~.DeleteEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_endpoint' not in self._stubs: + self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint', + request_serializer=endpoint_service.DeleteEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_endpoint'] + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Returns: + Callable[[~.DeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/DeployModel', + request_serializer=endpoint_service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/UndeployModel', + request_serializer=endpoint_service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + +__all__ = ( + 'EndpointServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..7c7d569905 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.longrunning import operations_pb2 # type: ignore +from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import EndpointServiceGrpcTransport + + +class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport): + """gRPC AsyncIO backend transport for EndpointService. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create endpoint method over gRPC. + + Creates an Endpoint. + + Returns: + Callable[[~.CreateEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_endpoint' not in self._stubs: + self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint', + request_serializer=endpoint_service.CreateEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_endpoint'] + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + Awaitable[endpoint.Endpoint]]: + r"""Return a callable for the get endpoint method over gRPC. + + Gets an Endpoint. + + Returns: + Callable[[~.GetEndpointRequest], + Awaitable[~.Endpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_endpoint' not in self._stubs: + self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/GetEndpoint', + request_serializer=endpoint_service.GetEndpointRequest.serialize, + response_deserializer=endpoint.Endpoint.deserialize, + ) + return self._stubs['get_endpoint'] + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + Awaitable[endpoint_service.ListEndpointsResponse]]: + r"""Return a callable for the list endpoints method over gRPC. + + Lists Endpoints in a Location. + + Returns: + Callable[[~.ListEndpointsRequest], + Awaitable[~.ListEndpointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_endpoints' not in self._stubs: + self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/ListEndpoints', + request_serializer=endpoint_service.ListEndpointsRequest.serialize, + response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, + ) + return self._stubs['list_endpoints'] + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + Awaitable[gca_endpoint.Endpoint]]: + r"""Return a callable for the update endpoint method over gRPC. + + Updates an Endpoint. + + Returns: + Callable[[~.UpdateEndpointRequest], + Awaitable[~.Endpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_endpoint' not in self._stubs: + self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint', + request_serializer=endpoint_service.UpdateEndpointRequest.serialize, + response_deserializer=gca_endpoint.Endpoint.deserialize, + ) + return self._stubs['update_endpoint'] + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete endpoint method over gRPC. + + Deletes an Endpoint. + + Returns: + Callable[[~.DeleteEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_endpoint' not in self._stubs: + self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint', + request_serializer=endpoint_service.DeleteEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_endpoint'] + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Returns: + Callable[[~.DeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/DeployModel', + request_serializer=endpoint_service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.EndpointService/UndeployModel', + request_serializer=endpoint_service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + +__all__ = ( + 'EndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py new file mode 100644 index 0000000000..817e1b49e2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import JobServiceClient +from .async_client import JobServiceAsyncClient + +__all__ = ( + 'JobServiceClient', + 'JobServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py new file mode 100644 index 0000000000..46ce9f78fb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -0,0 +1,1913 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.job_service import pagers +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import completion_stats +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import study +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport +from .client import JobServiceClient + + +class JobServiceAsyncClient: + """A service for creating and managing Vertex AI's jobs.""" + + _client: JobServiceClient + + DEFAULT_ENDPOINT = JobServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = JobServiceClient.DEFAULT_MTLS_ENDPOINT + + batch_prediction_job_path = staticmethod(JobServiceClient.batch_prediction_job_path) + parse_batch_prediction_job_path = staticmethod(JobServiceClient.parse_batch_prediction_job_path) + custom_job_path = staticmethod(JobServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(JobServiceClient.parse_custom_job_path) + data_labeling_job_path = staticmethod(JobServiceClient.data_labeling_job_path) + parse_data_labeling_job_path = staticmethod(JobServiceClient.parse_data_labeling_job_path) + dataset_path = staticmethod(JobServiceClient.dataset_path) + parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path) + hyperparameter_tuning_job_path = staticmethod(JobServiceClient.hyperparameter_tuning_job_path) + parse_hyperparameter_tuning_job_path = staticmethod(JobServiceClient.parse_hyperparameter_tuning_job_path) + model_path = staticmethod(JobServiceClient.model_path) + parse_model_path = staticmethod(JobServiceClient.parse_model_path) + network_path = staticmethod(JobServiceClient.network_path) + parse_network_path = staticmethod(JobServiceClient.parse_network_path) + trial_path = staticmethod(JobServiceClient.trial_path) + parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) + common_billing_account_path = staticmethod(JobServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(JobServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(JobServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(JobServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(JobServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(JobServiceClient.parse_common_organization_path) + common_project_path = staticmethod(JobServiceClient.common_project_path) + parse_common_project_path = staticmethod(JobServiceClient.parse_common_project_path) + common_location_path = staticmethod(JobServiceClient.common_location_path) + parse_common_location_path = staticmethod(JobServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceAsyncClient: The constructed client. + """ + return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceAsyncClient: The constructed client. + """ + return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobServiceTransport: + """Returns the transport used by the client instance. + + Returns: + JobServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(JobServiceClient).get_transport_class, type(JobServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, JobServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = JobServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_custom_job(self, + request: job_service.CreateCustomJobRequest = None, + *, + parent: str = None, + custom_job: gca_custom_job.CustomJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_custom_job.CustomJob: + r"""Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateCustomJobRequest`): + The request object. Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + custom_job (:class:`google.cloud.aiplatform_v1.types.CustomJob`): + Required. The CustomJob to create. + This corresponds to the ``custom_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, custom_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if custom_job is not None: + request.custom_job = custom_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_custom_job(self, + request: job_service.GetCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> custom_job.CustomJob: + r"""Gets a CustomJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetCustomJobRequest`): + The request object. Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_custom_jobs(self, + request: job_service.ListCustomJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCustomJobsAsyncPager: + r"""Lists CustomJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListCustomJobsRequest`): + The request object. Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager: + Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListCustomJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_custom_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListCustomJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_custom_job(self, + request: job_service.DeleteCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a CustomJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteCustomJobRequest`): + The request object. Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_custom_job(self, + request: job_service.CancelCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is + set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CancelCustomJobRequest`): + The request object. Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_data_labeling_job(self, + request: job_service.CreateDataLabelingJobRequest = None, + *, + parent: str = None, + data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_data_labeling_job.DataLabelingJob: + r"""Creates a DataLabelingJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest`): + The request object. Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. + parent (:class:`str`): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + data_labeling_job (:class:`google.cloud.aiplatform_v1.types.DataLabelingJob`): + Required. The DataLabelingJob to + create. + + This corresponds to the ``data_labeling_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, data_labeling_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if data_labeling_job is not None: + request.data_labeling_job = data_labeling_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_data_labeling_job(self, + request: job_service.GetDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> data_labeling_job.DataLabelingJob: + r"""Gets a DataLabelingJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest`): + The request object. Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_data_labeling_jobs(self, + request: job_service.ListDataLabelingJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataLabelingJobsAsyncPager: + r"""Lists DataLabelingJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest`): + The request object. Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + parent (:class:`str`): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: + Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListDataLabelingJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_data_labeling_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDataLabelingJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_data_labeling_job(self, + request: job_service.DeleteDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a DataLabelingJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest`): + The request object. Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_data_labeling_job(self, + request: job_service.CancelDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest`): + The request object. Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_hyperparameter_tuning_job(self, + request: job_service.CreateHyperparameterTuningJobRequest = None, + *, + parent: str = None, + hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Creates a HyperparameterTuningJob + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest`): + The request object. Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hyperparameter_tuning_job (:class:`google.cloud.aiplatform_v1.types.HyperparameterTuningJob`): + Required. The HyperparameterTuningJob + to create. + + This corresponds to the ``hyperparameter_tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, hyperparameter_tuning_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if hyperparameter_tuning_job is not None: + request.hyperparameter_tuning_job = hyperparameter_tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_hyperparameter_tuning_job(self, + request: job_service.GetHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Gets a HyperparameterTuningJob + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest`): + The request object. Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob + resource. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_hyperparameter_tuning_jobs(self, + request: job_service.ListHyperparameterTuningJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHyperparameterTuningJobsAsyncPager: + r"""Lists HyperparameterTuningJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest`): + The request object. Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: + Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListHyperparameterTuningJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_hyperparameter_tuning_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListHyperparameterTuningJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_hyperparameter_tuning_job(self, + request: job_service.DeleteHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a HyperparameterTuningJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest`): + The request object. Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob + resource to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_hyperparameter_tuning_job(self, + request: job_service.CancelHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest`): + The request object. Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob to + cancel. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_batch_prediction_job(self, + request: job_service.CreateBatchPredictionJobRequest = None, + *, + parent: str = None, + batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_batch_prediction_job.BatchPredictionJob: + r"""Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest`): + The request object. Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + batch_prediction_job (:class:`google.cloud.aiplatform_v1.types.BatchPredictionJob`): + Required. The BatchPredictionJob to + create. + + This corresponds to the ``batch_prediction_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, batch_prediction_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if batch_prediction_job is not None: + request.batch_prediction_job = batch_prediction_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_batch_prediction_job(self, + request: job_service.GetBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> batch_prediction_job.BatchPredictionJob: + r"""Gets a BatchPredictionJob + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest`): + The request object. Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_batch_prediction_jobs(self, + request: job_service.ListBatchPredictionJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBatchPredictionJobsAsyncPager: + r"""Lists BatchPredictionJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest`): + The request object. Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: + Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListBatchPredictionJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_batch_prediction_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBatchPredictionJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_batch_prediction_job(self, + request: job_service.DeleteBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest`): + The request object. Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob resource to + be deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_batch_prediction_job(self, + request: job_service.CancelBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest`): + The request object. Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py new file mode 100644 index 0000000000..76393c4891 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py @@ -0,0 +1,2163 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.job_service import pagers +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import completion_stats +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import study +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import JobServiceGrpcTransport +from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport + + +class JobServiceClientMeta(type): + """Metaclass for the JobService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] + _transport_registry["grpc"] = JobServiceGrpcTransport + _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[JobServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class JobServiceClient(metaclass=JobServiceClientMeta): + """A service for creating and managing Vertex AI's jobs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobServiceTransport: + """Returns the transport used by the client instance. + + Returns: + JobServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def batch_prediction_job_path(project: str,location: str,batch_prediction_job: str,) -> str: + """Returns a fully-qualified batch_prediction_job string.""" + return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) + + @staticmethod + def parse_batch_prediction_job_path(path: str) -> Dict[str,str]: + """Parses a batch_prediction_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def data_labeling_job_path(project: str,location: str,data_labeling_job: str,) -> str: + """Returns a fully-qualified data_labeling_job string.""" + return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) + + @staticmethod + def parse_data_labeling_job_path(path: str) -> Dict[str,str]: + """Parses a data_labeling_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def hyperparameter_tuning_job_path(project: str,location: str,hyperparameter_tuning_job: str,) -> str: + """Returns a fully-qualified hyperparameter_tuning_job string.""" + return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) + + @staticmethod + def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str,str]: + """Parses a hyperparameter_tuning_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def trial_path(project: str,location: str,study: str,trial: str,) -> str: + """Returns a fully-qualified trial string.""" + return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + + @staticmethod + def parse_trial_path(path: str) -> Dict[str,str]: + """Parses a trial path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, JobServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, JobServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, JobServiceTransport): + # transport is a JobServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_custom_job(self, + request: job_service.CreateCustomJobRequest = None, + *, + parent: str = None, + custom_job: gca_custom_job.CustomJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_custom_job.CustomJob: + r"""Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Args: + request (google.cloud.aiplatform_v1.types.CreateCustomJobRequest): + The request object. Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. + parent (str): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + custom_job (google.cloud.aiplatform_v1.types.CustomJob): + Required. The CustomJob to create. + This corresponds to the ``custom_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, custom_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateCustomJobRequest): + request = job_service.CreateCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if custom_job is not None: + request.custom_job = custom_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_custom_job(self, + request: job_service.GetCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> custom_job.CustomJob: + r"""Gets a CustomJob. + + Args: + request (google.cloud.aiplatform_v1.types.GetCustomJobRequest): + The request object. Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. + name (str): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetCustomJobRequest): + request = job_service.GetCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_custom_jobs(self, + request: job_service.ListCustomJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCustomJobsPager: + r"""Lists CustomJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): + The request object. Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. + parent (str): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager: + Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListCustomJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListCustomJobsRequest): + request = job_service.ListCustomJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListCustomJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_custom_job(self, + request: job_service.DeleteCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a CustomJob. + + Args: + request (google.cloud.aiplatform_v1.types.DeleteCustomJobRequest): + The request object. Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. + name (str): + Required. The name of the CustomJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteCustomJobRequest): + request = job_service.DeleteCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_custom_job(self, + request: job_service.CancelCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is + set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1.types.CancelCustomJobRequest): + The request object. Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. + name (str): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelCustomJobRequest): + request = job_service.CancelCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_data_labeling_job(self, + request: job_service.CreateDataLabelingJobRequest = None, + *, + parent: str = None, + data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_data_labeling_job.DataLabelingJob: + r"""Creates a DataLabelingJob. + + Args: + request (google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest): + The request object. Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob): + Required. The DataLabelingJob to + create. + + This corresponds to the ``data_labeling_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, data_labeling_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateDataLabelingJobRequest): + request = job_service.CreateDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if data_labeling_job is not None: + request.data_labeling_job = data_labeling_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_data_labeling_job(self, + request: job_service.GetDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> data_labeling_job.DataLabelingJob: + r"""Gets a DataLabelingJob. + + Args: + request (google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest): + The request object. Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetDataLabelingJobRequest): + request = job_service.GetDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_data_labeling_jobs(self, + request: job_service.ListDataLabelingJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataLabelingJobsPager: + r"""Lists DataLabelingJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): + The request object. Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager: + Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListDataLabelingJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListDataLabelingJobsRequest): + request = job_service.ListDataLabelingJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDataLabelingJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_data_labeling_job(self, + request: job_service.DeleteDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a DataLabelingJob. + + Args: + request (google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest): + The request object. Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteDataLabelingJobRequest): + request = job_service.DeleteDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_data_labeling_job(self, + request: job_service.CancelDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Args: + request (google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest): + The request object. Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelDataLabelingJobRequest): + request = job_service.CancelDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_hyperparameter_tuning_job(self, + request: job_service.CreateHyperparameterTuningJobRequest = None, + *, + parent: str = None, + hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Creates a HyperparameterTuningJob + + Args: + request (google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest): + The request object. Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. + parent (str): + Required. The resource name of the Location to create + the HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob): + Required. The HyperparameterTuningJob + to create. + + This corresponds to the ``hyperparameter_tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, hyperparameter_tuning_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest): + request = job_service.CreateHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if hyperparameter_tuning_job is not None: + request.hyperparameter_tuning_job = hyperparameter_tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_hyperparameter_tuning_job(self, + request: job_service.GetHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Gets a HyperparameterTuningJob + + Args: + request (google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest): + The request object. Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob + resource. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetHyperparameterTuningJobRequest): + request = job_service.GetHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_hyperparameter_tuning_jobs(self, + request: job_service.ListHyperparameterTuningJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHyperparameterTuningJobsPager: + r"""Lists HyperparameterTuningJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): + The request object. Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. + parent (str): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager: + Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListHyperparameterTuningJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest): + request = job_service.ListHyperparameterTuningJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_hyperparameter_tuning_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListHyperparameterTuningJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_hyperparameter_tuning_job(self, + request: job_service.DeleteHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a HyperparameterTuningJob. + + Args: + request (google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest): + The request object. Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob + resource to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest): + request = job_service.DeleteHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_hyperparameter_tuning_job(self, + request: job_service.CancelHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest): + The request object. Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob to + cancel. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest): + request = job_service.CancelHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_batch_prediction_job(self, + request: job_service.CreateBatchPredictionJobRequest = None, + *, + parent: str = None, + batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_batch_prediction_job.BatchPredictionJob: + r"""Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Args: + request (google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest): + The request object. Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. + parent (str): + Required. The resource name of the Location to create + the BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob): + Required. The BatchPredictionJob to + create. + + This corresponds to the ``batch_prediction_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, batch_prediction_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateBatchPredictionJobRequest): + request = job_service.CreateBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if batch_prediction_job is not None: + request.batch_prediction_job = batch_prediction_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_batch_prediction_job(self, + request: job_service.GetBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> batch_prediction_job.BatchPredictionJob: + r"""Gets a BatchPredictionJob + + Args: + request (google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest): + The request object. Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetBatchPredictionJobRequest): + request = job_service.GetBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_batch_prediction_jobs(self, + request: job_service.ListBatchPredictionJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBatchPredictionJobsPager: + r"""Lists BatchPredictionJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): + The request object. Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. + parent (str): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager: + Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListBatchPredictionJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListBatchPredictionJobsRequest): + request = job_service.ListBatchPredictionJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_batch_prediction_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBatchPredictionJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_batch_prediction_job(self, + request: job_service.DeleteBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Args: + request (google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest): + The request object. Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob resource to + be deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteBatchPredictionJobRequest): + request = job_service.DeleteBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_batch_prediction_job(self, + request: job_service.CancelBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Args: + request (google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest): + The request object. Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelBatchPredictionJobRequest): + request = job_service.CancelBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py new file mode 100644 index 0000000000..2567abe945 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py @@ -0,0 +1,510 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service + + +class ListCustomJobsPager: + """A pager for iterating through ``list_custom_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``custom_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListCustomJobs`` requests and continue to iterate + through the ``custom_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListCustomJobsResponse], + request: job_service.ListCustomJobsRequest, + response: job_service.ListCustomJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListCustomJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListCustomJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[custom_job.CustomJob]: + for page in self.pages: + yield from page.custom_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListCustomJobsAsyncPager: + """A pager for iterating through ``list_custom_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``custom_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListCustomJobs`` requests and continue to iterate + through the ``custom_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]], + request: job_service.ListCustomJobsRequest, + response: job_service.ListCustomJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListCustomJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.ListCustomJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[custom_job.CustomJob]: + async def async_generator(): + async for page in self.pages: + for response in page.custom_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataLabelingJobsPager: + """A pager for iterating through ``list_data_labeling_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``data_labeling_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDataLabelingJobs`` requests and continue to iterate + through the ``data_labeling_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListDataLabelingJobsResponse], + request: job_service.ListDataLabelingJobsRequest, + response: job_service.ListDataLabelingJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListDataLabelingJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListDataLabelingJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[data_labeling_job.DataLabelingJob]: + for page in self.pages: + yield from page.data_labeling_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataLabelingJobsAsyncPager: + """A pager for iterating through ``list_data_labeling_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``data_labeling_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDataLabelingJobs`` requests and continue to iterate + through the ``data_labeling_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]], + request: job_service.ListDataLabelingJobsRequest, + response: job_service.ListDataLabelingJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListDataLabelingJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.ListDataLabelingJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[data_labeling_job.DataLabelingJob]: + async def async_generator(): + async for page in self.pages: + for response in page.data_labeling_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHyperparameterTuningJobsPager: + """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``hyperparameter_tuning_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListHyperparameterTuningJobs`` requests and continue to iterate + through the ``hyperparameter_tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListHyperparameterTuningJobsResponse], + request: job_service.ListHyperparameterTuningJobsRequest, + response: job_service.ListHyperparameterTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListHyperparameterTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListHyperparameterTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[hyperparameter_tuning_job.HyperparameterTuningJob]: + for page in self.pages: + yield from page.hyperparameter_tuning_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHyperparameterTuningJobsAsyncPager: + """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``hyperparameter_tuning_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListHyperparameterTuningJobs`` requests and continue to iterate + through the ``hyperparameter_tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]], + request: job_service.ListHyperparameterTuningJobsRequest, + response: job_service.ListHyperparameterTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListHyperparameterTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.ListHyperparameterTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[hyperparameter_tuning_job.HyperparameterTuningJob]: + async def async_generator(): + async for page in self.pages: + for response in page.hyperparameter_tuning_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBatchPredictionJobsPager: + """A pager for iterating through ``list_batch_prediction_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``batch_prediction_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBatchPredictionJobs`` requests and continue to iterate + through the ``batch_prediction_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListBatchPredictionJobsResponse], + request: job_service.ListBatchPredictionJobsRequest, + response: job_service.ListBatchPredictionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListBatchPredictionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListBatchPredictionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[batch_prediction_job.BatchPredictionJob]: + for page in self.pages: + yield from page.batch_prediction_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBatchPredictionJobsAsyncPager: + """A pager for iterating through ``list_batch_prediction_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``batch_prediction_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBatchPredictionJobs`` requests and continue to iterate + through the ``batch_prediction_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]], + request: job_service.ListBatchPredictionJobsRequest, + response: job_service.ListBatchPredictionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListBatchPredictionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.ListBatchPredictionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[batch_prediction_job.BatchPredictionJob]: + async def async_generator(): + async for page in self.pages: + for response in page.batch_prediction_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py new file mode 100644 index 0000000000..13c5f7ade5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import JobServiceTransport +from .grpc import JobServiceGrpcTransport +from .grpc_asyncio import JobServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] +_transport_registry['grpc'] = JobServiceGrpcTransport +_transport_registry['grpc_asyncio'] = JobServiceGrpcAsyncIOTransport + +__all__ = ( + 'JobServiceTransport', + 'JobServiceGrpcTransport', + 'JobServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py new file mode 100644 index 0000000000..0f419ef18b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py @@ -0,0 +1,450 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class JobServiceTransport(abc.ABC): + """Abstract transport class for JobService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_custom_job: gapic_v1.method.wrap_method( + self.create_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_custom_job: gapic_v1.method.wrap_method( + self.get_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_custom_jobs: gapic_v1.method.wrap_method( + self.list_custom_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_custom_job: gapic_v1.method.wrap_method( + self.delete_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_custom_job: gapic_v1.method.wrap_method( + self.cancel_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_data_labeling_job: gapic_v1.method.wrap_method( + self.create_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_data_labeling_job: gapic_v1.method.wrap_method( + self.get_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_data_labeling_jobs: gapic_v1.method.wrap_method( + self.list_data_labeling_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_data_labeling_job: gapic_v1.method.wrap_method( + self.delete_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_data_labeling_job: gapic_v1.method.wrap_method( + self.cancel_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.create_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.get_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( + self.list_hyperparameter_tuning_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.delete_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.cancel_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_batch_prediction_job: gapic_v1.method.wrap_method( + self.create_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_batch_prediction_job: gapic_v1.method.wrap_method( + self.get_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( + self.list_batch_prediction_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_batch_prediction_job: gapic_v1.method.wrap_method( + self.delete_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( + self.cancel_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + Union[ + gca_custom_job.CustomJob, + Awaitable[gca_custom_job.CustomJob] + ]]: + raise NotImplementedError() + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + Union[ + custom_job.CustomJob, + Awaitable[custom_job.CustomJob] + ]]: + raise NotImplementedError() + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + Union[ + job_service.ListCustomJobsResponse, + Awaitable[job_service.ListCustomJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + Union[ + gca_data_labeling_job.DataLabelingJob, + Awaitable[gca_data_labeling_job.DataLabelingJob] + ]]: + raise NotImplementedError() + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + Union[ + data_labeling_job.DataLabelingJob, + Awaitable[data_labeling_job.DataLabelingJob] + ]]: + raise NotImplementedError() + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + Union[ + job_service.ListDataLabelingJobsResponse, + Awaitable[job_service.ListDataLabelingJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + Union[ + gca_hyperparameter_tuning_job.HyperparameterTuningJob, + Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob] + ]]: + raise NotImplementedError() + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + Union[ + hyperparameter_tuning_job.HyperparameterTuningJob, + Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob] + ]]: + raise NotImplementedError() + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + Union[ + job_service.ListHyperparameterTuningJobsResponse, + Awaitable[job_service.ListHyperparameterTuningJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + Union[ + gca_batch_prediction_job.BatchPredictionJob, + Awaitable[gca_batch_prediction_job.BatchPredictionJob] + ]]: + raise NotImplementedError() + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + Union[ + batch_prediction_job.BatchPredictionJob, + Awaitable[batch_prediction_job.BatchPredictionJob] + ]]: + raise NotImplementedError() + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + Union[ + job_service.ListBatchPredictionJobsResponse, + Awaitable[job_service.ListBatchPredictionJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'JobServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py new file mode 100644 index 0000000000..7543a064cb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py @@ -0,0 +1,818 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobServiceTransport, DEFAULT_CLIENT_INFO + + +class JobServiceGrpcTransport(JobServiceTransport): + """gRPC backend transport for JobService. + + A service for creating and managing Vertex AI's jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + gca_custom_job.CustomJob]: + r"""Return a callable for the create custom job method over gRPC. + + Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateCustomJobRequest], + ~.CustomJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_custom_job' not in self._stubs: + self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateCustomJob', + request_serializer=job_service.CreateCustomJobRequest.serialize, + response_deserializer=gca_custom_job.CustomJob.deserialize, + ) + return self._stubs['create_custom_job'] + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + custom_job.CustomJob]: + r"""Return a callable for the get custom job method over gRPC. + + Gets a CustomJob. + + Returns: + Callable[[~.GetCustomJobRequest], + ~.CustomJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_custom_job' not in self._stubs: + self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetCustomJob', + request_serializer=job_service.GetCustomJobRequest.serialize, + response_deserializer=custom_job.CustomJob.deserialize, + ) + return self._stubs['get_custom_job'] + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + job_service.ListCustomJobsResponse]: + r"""Return a callable for the list custom jobs method over gRPC. + + Lists CustomJobs in a Location. + + Returns: + Callable[[~.ListCustomJobsRequest], + ~.ListCustomJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_custom_jobs' not in self._stubs: + self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListCustomJobs', + request_serializer=job_service.ListCustomJobsRequest.serialize, + response_deserializer=job_service.ListCustomJobsResponse.deserialize, + ) + return self._stubs['list_custom_jobs'] + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete custom job method over gRPC. + + Deletes a CustomJob. + + Returns: + Callable[[~.DeleteCustomJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_custom_job' not in self._stubs: + self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteCustomJob', + request_serializer=job_service.DeleteCustomJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_custom_job'] + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel custom job method over gRPC. + + Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is + set to ``CANCELLED``. + + Returns: + Callable[[~.CancelCustomJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_custom_job' not in self._stubs: + self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelCustomJob', + request_serializer=job_service.CancelCustomJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_custom_job'] + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + gca_data_labeling_job.DataLabelingJob]: + r"""Return a callable for the create data labeling job method over gRPC. + + Creates a DataLabelingJob. + + Returns: + Callable[[~.CreateDataLabelingJobRequest], + ~.DataLabelingJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_data_labeling_job' not in self._stubs: + self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob', + request_serializer=job_service.CreateDataLabelingJobRequest.serialize, + response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['create_data_labeling_job'] + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + data_labeling_job.DataLabelingJob]: + r"""Return a callable for the get data labeling job method over gRPC. + + Gets a DataLabelingJob. + + Returns: + Callable[[~.GetDataLabelingJobRequest], + ~.DataLabelingJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_data_labeling_job' not in self._stubs: + self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob', + request_serializer=job_service.GetDataLabelingJobRequest.serialize, + response_deserializer=data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['get_data_labeling_job'] + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + job_service.ListDataLabelingJobsResponse]: + r"""Return a callable for the list data labeling jobs method over gRPC. + + Lists DataLabelingJobs in a Location. + + Returns: + Callable[[~.ListDataLabelingJobsRequest], + ~.ListDataLabelingJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_labeling_jobs' not in self._stubs: + self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs', + request_serializer=job_service.ListDataLabelingJobsRequest.serialize, + response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, + ) + return self._stubs['list_data_labeling_jobs'] + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete data labeling job method over gRPC. + + Deletes a DataLabelingJob. + + Returns: + Callable[[~.DeleteDataLabelingJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_data_labeling_job' not in self._stubs: + self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob', + request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_data_labeling_job'] + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel data labeling job method over gRPC. + + Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Returns: + Callable[[~.CancelDataLabelingJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_data_labeling_job' not in self._stubs: + self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob', + request_serializer=job_service.CancelDataLabelingJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_data_labeling_job'] + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + gca_hyperparameter_tuning_job.HyperparameterTuningJob]: + r"""Return a callable for the create hyperparameter tuning + job method over gRPC. + + Creates a HyperparameterTuningJob + + Returns: + Callable[[~.CreateHyperparameterTuningJobRequest], + ~.HyperparameterTuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_hyperparameter_tuning_job' not in self._stubs: + self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob', + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['create_hyperparameter_tuning_job'] + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + hyperparameter_tuning_job.HyperparameterTuningJob]: + r"""Return a callable for the get hyperparameter tuning job method over gRPC. + + Gets a HyperparameterTuningJob + + Returns: + Callable[[~.GetHyperparameterTuningJobRequest], + ~.HyperparameterTuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_hyperparameter_tuning_job' not in self._stubs: + self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob', + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['get_hyperparameter_tuning_job'] + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + job_service.ListHyperparameterTuningJobsResponse]: + r"""Return a callable for the list hyperparameter tuning + jobs method over gRPC. + + Lists HyperparameterTuningJobs in a Location. + + Returns: + Callable[[~.ListHyperparameterTuningJobsRequest], + ~.ListHyperparameterTuningJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hyperparameter_tuning_jobs' not in self._stubs: + self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs', + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) + return self._stubs['list_hyperparameter_tuning_jobs'] + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete hyperparameter tuning + job method over gRPC. + + Deletes a HyperparameterTuningJob. + + Returns: + Callable[[~.DeleteHyperparameterTuningJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_hyperparameter_tuning_job' not in self._stubs: + self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob', + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_hyperparameter_tuning_job'] + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel hyperparameter tuning + job method over gRPC. + + Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelHyperparameterTuningJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_hyperparameter_tuning_job' not in self._stubs: + self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob', + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_hyperparameter_tuning_job'] + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + gca_batch_prediction_job.BatchPredictionJob]: + r"""Return a callable for the create batch prediction job method over gRPC. + + Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Returns: + Callable[[~.CreateBatchPredictionJobRequest], + ~.BatchPredictionJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_batch_prediction_job' not in self._stubs: + self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob', + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['create_batch_prediction_job'] + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + batch_prediction_job.BatchPredictionJob]: + r"""Return a callable for the get batch prediction job method over gRPC. + + Gets a BatchPredictionJob + + Returns: + Callable[[~.GetBatchPredictionJobRequest], + ~.BatchPredictionJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_batch_prediction_job' not in self._stubs: + self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob', + request_serializer=job_service.GetBatchPredictionJobRequest.serialize, + response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['get_batch_prediction_job'] + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + job_service.ListBatchPredictionJobsResponse]: + r"""Return a callable for the list batch prediction jobs method over gRPC. + + Lists BatchPredictionJobs in a Location. + + Returns: + Callable[[~.ListBatchPredictionJobsRequest], + ~.ListBatchPredictionJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_batch_prediction_jobs' not in self._stubs: + self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs', + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) + return self._stubs['list_batch_prediction_jobs'] + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete batch prediction job method over gRPC. + + Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Returns: + Callable[[~.DeleteBatchPredictionJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_batch_prediction_job' not in self._stubs: + self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob', + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_batch_prediction_job'] + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel batch prediction job method over gRPC. + + Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Returns: + Callable[[~.CancelBatchPredictionJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_batch_prediction_job' not in self._stubs: + self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob', + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_batch_prediction_job'] + + +__all__ = ( + 'JobServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..e684252c6b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py @@ -0,0 +1,822 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import job_service +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import JobServiceGrpcTransport + + +class JobServiceGrpcAsyncIOTransport(JobServiceTransport): + """gRPC AsyncIO backend transport for JobService. + + A service for creating and managing Vertex AI's jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + Awaitable[gca_custom_job.CustomJob]]: + r"""Return a callable for the create custom job method over gRPC. + + Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateCustomJobRequest], + Awaitable[~.CustomJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_custom_job' not in self._stubs: + self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateCustomJob', + request_serializer=job_service.CreateCustomJobRequest.serialize, + response_deserializer=gca_custom_job.CustomJob.deserialize, + ) + return self._stubs['create_custom_job'] + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + Awaitable[custom_job.CustomJob]]: + r"""Return a callable for the get custom job method over gRPC. + + Gets a CustomJob. + + Returns: + Callable[[~.GetCustomJobRequest], + Awaitable[~.CustomJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_custom_job' not in self._stubs: + self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetCustomJob', + request_serializer=job_service.GetCustomJobRequest.serialize, + response_deserializer=custom_job.CustomJob.deserialize, + ) + return self._stubs['get_custom_job'] + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + Awaitable[job_service.ListCustomJobsResponse]]: + r"""Return a callable for the list custom jobs method over gRPC. + + Lists CustomJobs in a Location. + + Returns: + Callable[[~.ListCustomJobsRequest], + Awaitable[~.ListCustomJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_custom_jobs' not in self._stubs: + self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListCustomJobs', + request_serializer=job_service.ListCustomJobsRequest.serialize, + response_deserializer=job_service.ListCustomJobsResponse.deserialize, + ) + return self._stubs['list_custom_jobs'] + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete custom job method over gRPC. + + Deletes a CustomJob. + + Returns: + Callable[[~.DeleteCustomJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_custom_job' not in self._stubs: + self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteCustomJob', + request_serializer=job_service.DeleteCustomJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_custom_job'] + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel custom job method over gRPC. + + Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is + set to ``CANCELLED``. + + Returns: + Callable[[~.CancelCustomJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_custom_job' not in self._stubs: + self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelCustomJob', + request_serializer=job_service.CancelCustomJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_custom_job'] + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + Awaitable[gca_data_labeling_job.DataLabelingJob]]: + r"""Return a callable for the create data labeling job method over gRPC. + + Creates a DataLabelingJob. + + Returns: + Callable[[~.CreateDataLabelingJobRequest], + Awaitable[~.DataLabelingJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_data_labeling_job' not in self._stubs: + self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob', + request_serializer=job_service.CreateDataLabelingJobRequest.serialize, + response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['create_data_labeling_job'] + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + Awaitable[data_labeling_job.DataLabelingJob]]: + r"""Return a callable for the get data labeling job method over gRPC. + + Gets a DataLabelingJob. + + Returns: + Callable[[~.GetDataLabelingJobRequest], + Awaitable[~.DataLabelingJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_data_labeling_job' not in self._stubs: + self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob', + request_serializer=job_service.GetDataLabelingJobRequest.serialize, + response_deserializer=data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['get_data_labeling_job'] + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + Awaitable[job_service.ListDataLabelingJobsResponse]]: + r"""Return a callable for the list data labeling jobs method over gRPC. + + Lists DataLabelingJobs in a Location. + + Returns: + Callable[[~.ListDataLabelingJobsRequest], + Awaitable[~.ListDataLabelingJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_labeling_jobs' not in self._stubs: + self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs', + request_serializer=job_service.ListDataLabelingJobsRequest.serialize, + response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, + ) + return self._stubs['list_data_labeling_jobs'] + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete data labeling job method over gRPC. + + Deletes a DataLabelingJob. + + Returns: + Callable[[~.DeleteDataLabelingJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_data_labeling_job' not in self._stubs: + self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob', + request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_data_labeling_job'] + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel data labeling job method over gRPC. + + Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Returns: + Callable[[~.CancelDataLabelingJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_data_labeling_job' not in self._stubs: + self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob', + request_serializer=job_service.CancelDataLabelingJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_data_labeling_job'] + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob]]: + r"""Return a callable for the create hyperparameter tuning + job method over gRPC. + + Creates a HyperparameterTuningJob + + Returns: + Callable[[~.CreateHyperparameterTuningJobRequest], + Awaitable[~.HyperparameterTuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_hyperparameter_tuning_job' not in self._stubs: + self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob', + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['create_hyperparameter_tuning_job'] + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob]]: + r"""Return a callable for the get hyperparameter tuning job method over gRPC. + + Gets a HyperparameterTuningJob + + Returns: + Callable[[~.GetHyperparameterTuningJobRequest], + Awaitable[~.HyperparameterTuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_hyperparameter_tuning_job' not in self._stubs: + self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob', + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['get_hyperparameter_tuning_job'] + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + Awaitable[job_service.ListHyperparameterTuningJobsResponse]]: + r"""Return a callable for the list hyperparameter tuning + jobs method over gRPC. + + Lists HyperparameterTuningJobs in a Location. + + Returns: + Callable[[~.ListHyperparameterTuningJobsRequest], + Awaitable[~.ListHyperparameterTuningJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hyperparameter_tuning_jobs' not in self._stubs: + self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs', + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) + return self._stubs['list_hyperparameter_tuning_jobs'] + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete hyperparameter tuning + job method over gRPC. + + Deletes a HyperparameterTuningJob. + + Returns: + Callable[[~.DeleteHyperparameterTuningJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_hyperparameter_tuning_job' not in self._stubs: + self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob', + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_hyperparameter_tuning_job'] + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel hyperparameter tuning + job method over gRPC. + + Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelHyperparameterTuningJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_hyperparameter_tuning_job' not in self._stubs: + self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob', + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_hyperparameter_tuning_job'] + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + Awaitable[gca_batch_prediction_job.BatchPredictionJob]]: + r"""Return a callable for the create batch prediction job method over gRPC. + + Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Returns: + Callable[[~.CreateBatchPredictionJobRequest], + Awaitable[~.BatchPredictionJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_batch_prediction_job' not in self._stubs: + self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob', + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['create_batch_prediction_job'] + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + Awaitable[batch_prediction_job.BatchPredictionJob]]: + r"""Return a callable for the get batch prediction job method over gRPC. + + Gets a BatchPredictionJob + + Returns: + Callable[[~.GetBatchPredictionJobRequest], + Awaitable[~.BatchPredictionJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_batch_prediction_job' not in self._stubs: + self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob', + request_serializer=job_service.GetBatchPredictionJobRequest.serialize, + response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['get_batch_prediction_job'] + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + Awaitable[job_service.ListBatchPredictionJobsResponse]]: + r"""Return a callable for the list batch prediction jobs method over gRPC. + + Lists BatchPredictionJobs in a Location. + + Returns: + Callable[[~.ListBatchPredictionJobsRequest], + Awaitable[~.ListBatchPredictionJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_batch_prediction_jobs' not in self._stubs: + self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs', + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) + return self._stubs['list_batch_prediction_jobs'] + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete batch prediction job method over gRPC. + + Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Returns: + Callable[[~.DeleteBatchPredictionJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_batch_prediction_job' not in self._stubs: + self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob', + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_batch_prediction_job'] + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel batch prediction job method over gRPC. + + Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Returns: + Callable[[~.CancelBatchPredictionJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_batch_prediction_job' not in self._stubs: + self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob', + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_batch_prediction_job'] + + +__all__ = ( + 'JobServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py new file mode 100644 index 0000000000..b32b10b1d7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import MigrationServiceClient +from .async_client import MigrationServiceAsyncClient + +__all__ = ( + 'MigrationServiceClient', + 'MigrationServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py new file mode 100644 index 0000000000..c2d497872e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.migration_service import pagers +from google.cloud.aiplatform_v1.types import migratable_resource +from google.cloud.aiplatform_v1.types import migration_service +from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport +from .client import MigrationServiceClient + + +class MigrationServiceAsyncClient: + """A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + """ + + _client: MigrationServiceClient + + DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT + + annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path) + parse_annotated_dataset_path = staticmethod(MigrationServiceClient.parse_annotated_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + model_path = staticmethod(MigrationServiceClient.model_path) + parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) + model_path = staticmethod(MigrationServiceClient.model_path) + parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) + version_path = staticmethod(MigrationServiceClient.version_path) + parse_version_path = staticmethod(MigrationServiceClient.parse_version_path) + common_billing_account_path = staticmethod(MigrationServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(MigrationServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(MigrationServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(MigrationServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(MigrationServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(MigrationServiceClient.parse_common_organization_path) + common_project_path = staticmethod(MigrationServiceClient.common_project_path) + parse_common_project_path = staticmethod(MigrationServiceClient.parse_common_project_path) + common_location_path = staticmethod(MigrationServiceClient.common_location_path) + parse_common_location_path = staticmethod(MigrationServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceAsyncClient: The constructed client. + """ + return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceAsyncClient: The constructed client. + """ + return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MigrationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MigrationServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, MigrationServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the migration service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MigrationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = MigrationServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def search_migratable_resources(self, + request: migration_service.SearchMigratableResourcesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchMigratableResourcesAsyncPager: + r"""Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest`): + The request object. Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + parent (:class:`str`): + Required. The location that the migratable resources + should be searched from. It's the Vertex AI location + that the resources can be migrated to, not the + resources' original location. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: + Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = migration_service.SearchMigratableResourcesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_migratable_resources, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchMigratableResourcesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_migrate_resources(self, + request: migration_service.BatchMigrateResourcesRequest = None, + *, + parent: str = None, + migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest`): + The request object. Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + parent (:class:`str`): + Required. The location of the migrated resource will + live in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + migrate_resource_requests (:class:`Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]`): + Required. The request messages + specifying the resources to migrate. + They must be in the same location as the + destination. Up to 50 resources can be + migrated in one batch. + + This corresponds to the ``migrate_resource_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` + Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, migrate_resource_requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = migration_service.BatchMigrateResourcesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if migrate_resource_requests: + request.migrate_resource_requests.extend(migrate_resource_requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_migrate_resources, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + migration_service.BatchMigrateResourcesResponse, + metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MigrationServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py new file mode 100644 index 0000000000..0c288329ab --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -0,0 +1,617 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.migration_service import pagers +from google.cloud.aiplatform_v1.types import migratable_resource +from google.cloud.aiplatform_v1.types import migration_service +from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MigrationServiceGrpcTransport +from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport + + +class MigrationServiceClientMeta(type): + """Metaclass for the MigrationService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] + _transport_registry["grpc"] = MigrationServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[MigrationServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MigrationServiceClient(metaclass=MigrationServiceClientMeta): + """A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MigrationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MigrationServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotated_dataset_path(project: str,dataset: str,annotated_dataset: str,) -> str: + """Returns a fully-qualified annotated_dataset string.""" + return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) + + @staticmethod + def parse_annotated_dataset_path(path: str) -> Dict[str,str]: + """Parses a annotated_dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def version_path(project: str,model: str,version: str,) -> str: + """Returns a fully-qualified version string.""" + return "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) + + @staticmethod + def parse_version_path(path: str) -> Dict[str,str]: + """Parses a version path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, MigrationServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the migration service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MigrationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MigrationServiceTransport): + # transport is a MigrationServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def search_migratable_resources(self, + request: migration_service.SearchMigratableResourcesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchMigratableResourcesPager: + r"""Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Args: + request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): + The request object. Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + parent (str): + Required. The location that the migratable resources + should be searched from. It's the Vertex AI location + that the resources can be migrated to, not the + resources' original location. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager: + Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a migration_service.SearchMigratableResourcesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, migration_service.SearchMigratableResourcesRequest): + request = migration_service.SearchMigratableResourcesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_migratable_resources] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchMigratableResourcesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_migrate_resources(self, + request: migration_service.BatchMigrateResourcesRequest = None, + *, + parent: str = None, + migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Args: + request (google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest): + The request object. Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + parent (str): + Required. The location of the migrated resource will + live in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]): + Required. The request messages + specifying the resources to migrate. + They must be in the same location as the + destination. Up to 50 resources can be + migrated in one batch. + + This corresponds to the ``migrate_resource_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` + Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, migrate_resource_requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a migration_service.BatchMigrateResourcesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, migration_service.BatchMigrateResourcesRequest): + request = migration_service.BatchMigrateResourcesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if migrate_resource_requests is not None: + request.migrate_resource_requests = migrate_resource_requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + migration_service.BatchMigrateResourcesResponse, + metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MigrationServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py new file mode 100644 index 0000000000..b173c77431 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1.types import migratable_resource +from google.cloud.aiplatform_v1.types import migration_service + + +class SearchMigratableResourcesPager: + """A pager for iterating through ``search_migratable_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``migratable_resources`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchMigratableResources`` requests and continue to iterate + through the ``migratable_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., migration_service.SearchMigratableResourcesResponse], + request: migration_service.SearchMigratableResourcesRequest, + response: migration_service.SearchMigratableResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = migration_service.SearchMigratableResourcesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[migration_service.SearchMigratableResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[migratable_resource.MigratableResource]: + for page in self.pages: + yield from page.migratable_resources + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchMigratableResourcesAsyncPager: + """A pager for iterating through ``search_migratable_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``migratable_resources`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchMigratableResources`` requests and continue to iterate + through the ``migratable_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[migration_service.SearchMigratableResourcesResponse]], + request: migration_service.SearchMigratableResourcesRequest, + response: migration_service.SearchMigratableResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = migration_service.SearchMigratableResourcesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[migration_service.SearchMigratableResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[migratable_resource.MigratableResource]: + async def async_generator(): + async for page in self.pages: + for response in page.migratable_resources: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py new file mode 100644 index 0000000000..8f036c410e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import MigrationServiceTransport +from .grpc import MigrationServiceGrpcTransport +from .grpc_asyncio import MigrationServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] +_transport_registry['grpc'] = MigrationServiceGrpcTransport +_transport_registry['grpc_asyncio'] = MigrationServiceGrpcAsyncIOTransport + +__all__ = ( + 'MigrationServiceTransport', + 'MigrationServiceGrpcTransport', + 'MigrationServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py new file mode 100644 index 0000000000..054992b551 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import migration_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class MigrationServiceTransport(abc.ABC): + """Abstract transport class for MigrationService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.search_migratable_resources: gapic_v1.method.wrap_method( + self.search_migratable_resources, + default_timeout=None, + client_info=client_info, + ), + self.batch_migrate_resources: gapic_v1.method.wrap_method( + self.batch_migrate_resources, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + Union[ + migration_service.SearchMigratableResourcesResponse, + Awaitable[migration_service.SearchMigratableResourcesResponse] + ]]: + raise NotImplementedError() + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'MigrationServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py new file mode 100644 index 0000000000..f1eaa10238 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import migration_service +from google.longrunning import operations_pb2 # type: ignore +from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO + + +class MigrationServiceGrpcTransport(MigrationServiceTransport): + """gRPC backend transport for MigrationService. + + A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + migration_service.SearchMigratableResourcesResponse]: + r"""Return a callable for the search migratable resources method over gRPC. + + Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Returns: + Callable[[~.SearchMigratableResourcesRequest], + ~.SearchMigratableResourcesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_migratable_resources' not in self._stubs: + self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources', + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) + return self._stubs['search_migratable_resources'] + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch migrate resources method over gRPC. + + Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Returns: + Callable[[~.BatchMigrateResourcesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_migrate_resources' not in self._stubs: + self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources', + request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_migrate_resources'] + + +__all__ = ( + 'MigrationServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..15acd530c6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import migration_service +from google.longrunning import operations_pb2 # type: ignore +from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MigrationServiceGrpcTransport + + +class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): + """gRPC AsyncIO backend transport for MigrationService. + + A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + Awaitable[migration_service.SearchMigratableResourcesResponse]]: + r"""Return a callable for the search migratable resources method over gRPC. + + Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Returns: + Callable[[~.SearchMigratableResourcesRequest], + Awaitable[~.SearchMigratableResourcesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_migratable_resources' not in self._stubs: + self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources', + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) + return self._stubs['search_migratable_resources'] + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch migrate resources method over gRPC. + + Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Returns: + Callable[[~.BatchMigrateResourcesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_migrate_resources' not in self._stubs: + self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources', + request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_migrate_resources'] + + +__all__ = ( + 'MigrationServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py new file mode 100644 index 0000000000..5c4d570d15 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ModelServiceClient +from .async_client import ModelServiceAsyncClient + +__all__ = ( + 'ModelServiceClient', + 'ModelServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py new file mode 100644 index 0000000000..f4ca7cf4c8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -0,0 +1,1059 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.model_service import pagers +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport +from .client import ModelServiceClient + + +class ModelServiceAsyncClient: + """A service for managing Vertex AI's machine learning Models.""" + + _client: ModelServiceClient + + DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(ModelServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(ModelServiceClient.parse_endpoint_path) + model_path = staticmethod(ModelServiceClient.model_path) + parse_model_path = staticmethod(ModelServiceClient.parse_model_path) + model_evaluation_path = staticmethod(ModelServiceClient.model_evaluation_path) + parse_model_evaluation_path = staticmethod(ModelServiceClient.parse_model_evaluation_path) + model_evaluation_slice_path = staticmethod(ModelServiceClient.model_evaluation_slice_path) + parse_model_evaluation_slice_path = staticmethod(ModelServiceClient.parse_model_evaluation_slice_path) + training_pipeline_path = staticmethod(ModelServiceClient.training_pipeline_path) + parse_training_pipeline_path = staticmethod(ModelServiceClient.parse_training_pipeline_path) + common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(ModelServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(ModelServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path) + common_project_path = staticmethod(ModelServiceClient.common_project_path) + parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path) + common_location_path = staticmethod(ModelServiceClient.common_location_path) + parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, ModelServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ModelServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def upload_model(self, + request: model_service.UploadModelRequest = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Uploads a Model artifact into Vertex AI. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UploadModelRequest`): + The request object. Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. + parent (:class:`str`): + Required. The resource name of the Location into which + to upload the Model. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`google.cloud.aiplatform_v1.types.Model`): + Required. The Model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` + Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.UploadModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.upload_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_service.UploadModelResponse, + metadata_type=model_service.UploadModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model(self, + request: model_service.GetModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a Model. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetModelRequest`): + The request object. Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. + name (:class:`str`): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_models(self, + request: model_service.ListModelsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists Models in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListModelsRequest`): + The request object. Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. + parent (:class:`str`): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager: + Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_model(self, + request: model_service.UpdateModelRequest = None, + *, + model: gca_model.Model = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a Model. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateModelRequest`): + The request object. Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. + model (:class:`google.cloud.aiplatform_v1.types.Model`): + Required. The Model which replaces + the resource on the server. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.UpdateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_model(self, + request: model_service.DeleteModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Model. + Note: Model can only be deleted if there are no + DeployedModels created from it. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteModelRequest`): + The request object. Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. + name (:class:`str`): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_model(self, + request: model_service.ExportModelRequest = None, + *, + name: str = None, + output_config: model_service.ExportModelRequest.OutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports a trained, exportable, Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1.Model.supported_export_formats]. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ExportModelRequest`): + The request object. Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. + name (:class:`str`): + Required. The resource name of the Model to export. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig`): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` + Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_service.ExportModelResponse, + metadata_type=model_service.ExportModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation(self, + request: model_service.GetModelEvaluationRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a ModelEvaluation. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetModelEvaluationRequest`): + The request object. Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. + name (:class:`str`): + Required. The name of the ModelEvaluation resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluations(self, + request: model_service.ListModelEvaluationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsAsyncPager: + r"""Lists ModelEvaluations in a Model. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest`): + The request object. Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + parent (:class:`str`): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager: + Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluations, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation_slice(self, + request: model_service.GetModelEvaluationSliceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation_slice.ModelEvaluationSlice: + r"""Gets a ModelEvaluationSlice. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest`): + The request object. Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. + name (:class:`str`): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluationSlice: + A collection of metrics calculated by + comparing Model's predictions on a slice + of the test data against ground truth + annotations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelEvaluationSliceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation_slice, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluation_slices(self, + request: model_service.ListModelEvaluationSlicesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationSlicesAsyncPager: + r"""Lists ModelEvaluationSlices in a ModelEvaluation. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest`): + The request object. Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + parent (:class:`str`): + Required. The resource name of the ModelEvaluation to + list the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: + Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelEvaluationSlicesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluation_slices, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationSlicesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ModelServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py new file mode 100644 index 0000000000..80401b2ec2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py @@ -0,0 +1,1282 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.model_service import pagers +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ModelServiceGrpcTransport +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +class ModelServiceClientMeta(type): + """Metaclass for the ModelService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] + _transport_registry["grpc"] = ModelServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ModelServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ModelServiceClient(metaclass=ModelServiceClientMeta): + """A service for managing Vertex AI's machine learning Models.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_path(project: str,location: str,model: str,evaluation: str,) -> str: + """Returns a fully-qualified model_evaluation string.""" + return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) + + @staticmethod + def parse_model_evaluation_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_slice_path(project: str,location: str,model: str,evaluation: str,slice: str,) -> str: + """Returns a fully-qualified model_evaluation_slice string.""" + return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) + + @staticmethod + def parse_model_evaluation_slice_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation_slice path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: + """Returns a fully-qualified training_pipeline string.""" + return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + + @staticmethod + def parse_training_pipeline_path(path: str) -> Dict[str,str]: + """Parses a training_pipeline path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ModelServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ModelServiceTransport): + # transport is a ModelServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def upload_model(self, + request: model_service.UploadModelRequest = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Uploads a Model artifact into Vertex AI. + + Args: + request (google.cloud.aiplatform_v1.types.UploadModelRequest): + The request object. Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. + parent (str): + Required. The resource name of the Location into which + to upload the Model. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (google.cloud.aiplatform_v1.types.Model): + Required. The Model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` + Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UploadModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UploadModelRequest): + request = model_service.UploadModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.upload_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + model_service.UploadModelResponse, + metadata_type=model_service.UploadModelOperationMetadata, + ) + + # Done; return the response. + return response + + def get_model(self, + request: model_service.GetModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a Model. + + Args: + request (google.cloud.aiplatform_v1.types.GetModelRequest): + The request object. Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. + name (str): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelRequest): + request = model_service.GetModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_models(self, + request: model_service.ListModelsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists Models in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListModelsRequest): + The request object. Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. + parent (str): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager: + Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelsRequest): + request = model_service.ListModelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_model(self, + request: model_service.UpdateModelRequest = None, + *, + model: gca_model.Model = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a Model. + + Args: + request (google.cloud.aiplatform_v1.types.UpdateModelRequest): + The request object. Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. + model (google.cloud.aiplatform_v1.types.Model): + Required. The Model which replaces + the resource on the server. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UpdateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UpdateModelRequest): + request = model_service.UpdateModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_model(self, + request: model_service.DeleteModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Model. + Note: Model can only be deleted if there are no + DeployedModels created from it. + + Args: + request (google.cloud.aiplatform_v1.types.DeleteModelRequest): + The request object. Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. + name (str): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.DeleteModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.DeleteModelRequest): + request = model_service.DeleteModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def export_model(self, + request: model_service.ExportModelRequest = None, + *, + name: str = None, + output_config: model_service.ExportModelRequest.OutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports a trained, exportable, Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1.Model.supported_export_formats]. + + Args: + request (google.cloud.aiplatform_v1.types.ExportModelRequest): + The request object. Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. + name (str): + Required. The resource name of the Model to export. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` + Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ExportModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ExportModelRequest): + request = model_service.ExportModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + model_service.ExportModelResponse, + metadata_type=model_service.ExportModelOperationMetadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation(self, + request: model_service.GetModelEvaluationRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a ModelEvaluation. + + Args: + request (google.cloud.aiplatform_v1.types.GetModelEvaluationRequest): + The request object. Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. + name (str): + Required. The name of the ModelEvaluation resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelEvaluationRequest): + request = model_service.GetModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluations(self, + request: model_service.ListModelEvaluationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsPager: + r"""Lists ModelEvaluations in a Model. + + Args: + request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): + The request object. Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + parent (str): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager: + Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelEvaluationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelEvaluationsRequest): + request = model_service.ListModelEvaluationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation_slice(self, + request: model_service.GetModelEvaluationSliceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation_slice.ModelEvaluationSlice: + r"""Gets a ModelEvaluationSlice. + + Args: + request (google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest): + The request object. Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. + name (str): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.ModelEvaluationSlice: + A collection of metrics calculated by + comparing Model's predictions on a slice + of the test data against ground truth + annotations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelEvaluationSliceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelEvaluationSliceRequest): + request = model_service.GetModelEvaluationSliceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation_slice] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluation_slices(self, + request: model_service.ListModelEvaluationSlicesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationSlicesPager: + r"""Lists ModelEvaluationSlices in a ModelEvaluation. + + Args: + request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): + The request object. Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + parent (str): + Required. The resource name of the ModelEvaluation to + list the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager: + Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelEvaluationSlicesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelEvaluationSlicesRequest): + request = model_service.ListModelEvaluationSlicesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluation_slices] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationSlicesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ModelServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py new file mode 100644 index 0000000000..908b0fe9ac --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py @@ -0,0 +1,387 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelsResponse], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model.Model]: + for page in self.pages: + yield from page.models + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelsResponse]], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelEvaluationsResponse], + request: model_service.ListModelEvaluationsRequest, + response: model_service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[model_service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]: + for page in self.pages: + yield from page.model_evaluations + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsAsyncPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]], + request: model_service.ListModelEvaluationsRequest, + response: model_service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationSlicesPager: + """A pager for iterating through ``list_model_evaluation_slices`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluation_slices`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluationSlices`` requests and continue to iterate + through the ``model_evaluation_slices`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelEvaluationSlicesResponse], + request: model_service.ListModelEvaluationSlicesRequest, + response: model_service.ListModelEvaluationSlicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationSlicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[model_service.ListModelEvaluationSlicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model_evaluation_slice.ModelEvaluationSlice]: + for page in self.pages: + yield from page.model_evaluation_slices + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationSlicesAsyncPager: + """A pager for iterating through ``list_model_evaluation_slices`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluation_slices`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluationSlices`` requests and continue to iterate + through the ``model_evaluation_slices`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelEvaluationSlicesResponse]], + request: model_service.ListModelEvaluationSlicesRequest, + response: model_service.ListModelEvaluationSlicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationSlicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationSlicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model_evaluation_slice.ModelEvaluationSlice]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluation_slices: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py new file mode 100644 index 0000000000..0f09224d3c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ModelServiceTransport +from .grpc import ModelServiceGrpcTransport +from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] +_transport_registry['grpc'] = ModelServiceGrpcTransport +_transport_registry['grpc_asyncio'] = ModelServiceGrpcAsyncIOTransport + +__all__ = ( + 'ModelServiceTransport', + 'ModelServiceGrpcTransport', + 'ModelServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py new file mode 100644 index 0000000000..96d0c92e92 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py @@ -0,0 +1,305 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class ModelServiceTransport(abc.ABC): + """Abstract transport class for ModelService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.upload_model: gapic_v1.method.wrap_method( + self.upload_model, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_timeout=5.0, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_timeout=5.0, + client_info=client_info, + ), + self.update_model: gapic_v1.method.wrap_method( + self.update_model, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model: gapic_v1.method.wrap_method( + self.delete_model, + default_timeout=5.0, + client_info=client_info, + ), + self.export_model: gapic_v1.method.wrap_method( + self.export_model, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_evaluation: gapic_v1.method.wrap_method( + self.get_model_evaluation, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluations: gapic_v1.method.wrap_method( + self.list_model_evaluations, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_evaluation_slice: gapic_v1.method.wrap_method( + self.get_model_evaluation_slice, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluation_slices: gapic_v1.method.wrap_method( + self.list_model_evaluation_slices, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + Union[ + model.Model, + Awaitable[model.Model] + ]]: + raise NotImplementedError() + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + Union[ + model_service.ListModelsResponse, + Awaitable[model_service.ListModelsResponse] + ]]: + raise NotImplementedError() + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + Union[ + gca_model.Model, + Awaitable[gca_model.Model] + ]]: + raise NotImplementedError() + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + Union[ + model_evaluation.ModelEvaluation, + Awaitable[model_evaluation.ModelEvaluation] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + Union[ + model_service.ListModelEvaluationsResponse, + Awaitable[model_service.ListModelEvaluationsResponse] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + Union[ + model_evaluation_slice.ModelEvaluationSlice, + Awaitable[model_evaluation_slice.ModelEvaluationSlice] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + Union[ + model_service.ListModelEvaluationSlicesResponse, + Awaitable[model_service.ListModelEvaluationSlicesResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ModelServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py new file mode 100644 index 0000000000..c3aa1b5a38 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -0,0 +1,514 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO + + +class ModelServiceGrpcTransport(ModelServiceTransport): + """gRPC backend transport for ModelService. + + A service for managing Vertex AI's machine learning Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the upload model method over gRPC. + + Uploads a Model artifact into Vertex AI. + + Returns: + Callable[[~.UploadModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'upload_model' not in self._stubs: + self._stubs['upload_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/UploadModel', + request_serializer=model_service.UploadModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['upload_model'] + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets a Model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModel', + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + model_service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists Models in a Location. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModels', + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + gca_model.Model]: + r"""Return a callable for the update model method over gRPC. + + Updates a Model. + + Returns: + Callable[[~.UpdateModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/UpdateModel', + request_serializer=model_service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a Model. + Note: Model can only be deleted if there are no + DeployedModels created from it. + + Returns: + Callable[[~.DeleteModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/DeleteModel', + request_serializer=model_service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, exportable, Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1.Model.supported_export_formats]. + + Returns: + Callable[[~.ExportModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ExportModel', + request_serializer=model_service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + model_evaluation.ModelEvaluation]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a ModelEvaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation', + request_serializer=model_service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + model_service.ListModelEvaluationsResponse]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists ModelEvaluations in a Model. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + ~.ListModelEvaluationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations', + request_serializer=model_service.ListModelEvaluationsRequest.serialize, + response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + model_evaluation_slice.ModelEvaluationSlice]: + r"""Return a callable for the get model evaluation slice method over gRPC. + + Gets a ModelEvaluationSlice. + + Returns: + Callable[[~.GetModelEvaluationSliceRequest], + ~.ModelEvaluationSlice]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation_slice' not in self._stubs: + self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice', + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) + return self._stubs['get_model_evaluation_slice'] + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + model_service.ListModelEvaluationSlicesResponse]: + r"""Return a callable for the list model evaluation slices method over gRPC. + + Lists ModelEvaluationSlices in a ModelEvaluation. + + Returns: + Callable[[~.ListModelEvaluationSlicesRequest], + ~.ListModelEvaluationSlicesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluation_slices' not in self._stubs: + self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices', + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) + return self._stubs['list_model_evaluation_slices'] + + +__all__ = ( + 'ModelServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..429ad907b1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -0,0 +1,518 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import ModelServiceGrpcTransport + + +class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): + """gRPC AsyncIO backend transport for ModelService. + + A service for managing Vertex AI's machine learning Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the upload model method over gRPC. + + Uploads a Model artifact into Vertex AI. + + Returns: + Callable[[~.UploadModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'upload_model' not in self._stubs: + self._stubs['upload_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/UploadModel', + request_serializer=model_service.UploadModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['upload_model'] + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets a Model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModel', + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + Awaitable[model_service.ListModelsResponse]]: + r"""Return a callable for the list models method over gRPC. + + Lists Models in a Location. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModels', + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + Awaitable[gca_model.Model]]: + r"""Return a callable for the update model method over gRPC. + + Updates a Model. + + Returns: + Callable[[~.UpdateModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/UpdateModel', + request_serializer=model_service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a Model. + Note: Model can only be deleted if there are no + DeployedModels created from it. + + Returns: + Callable[[~.DeleteModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/DeleteModel', + request_serializer=model_service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, exportable, Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1.Model.supported_export_formats]. + + Returns: + Callable[[~.ExportModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ExportModel', + request_serializer=model_service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + Awaitable[model_evaluation.ModelEvaluation]]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a ModelEvaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation', + request_serializer=model_service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + Awaitable[model_service.ListModelEvaluationsResponse]]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists ModelEvaluations in a Model. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + Awaitable[~.ListModelEvaluationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations', + request_serializer=model_service.ListModelEvaluationsRequest.serialize, + response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + Awaitable[model_evaluation_slice.ModelEvaluationSlice]]: + r"""Return a callable for the get model evaluation slice method over gRPC. + + Gets a ModelEvaluationSlice. + + Returns: + Callable[[~.GetModelEvaluationSliceRequest], + Awaitable[~.ModelEvaluationSlice]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation_slice' not in self._stubs: + self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice', + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) + return self._stubs['get_model_evaluation_slice'] + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + Awaitable[model_service.ListModelEvaluationSlicesResponse]]: + r"""Return a callable for the list model evaluation slices method over gRPC. + + Lists ModelEvaluationSlices in a ModelEvaluation. + + Returns: + Callable[[~.ListModelEvaluationSlicesRequest], + Awaitable[~.ListModelEvaluationSlicesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluation_slices' not in self._stubs: + self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices', + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) + return self._stubs['list_model_evaluation_slices'] + + +__all__ = ( + 'ModelServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py new file mode 100644 index 0000000000..539616023d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PipelineServiceClient +from .async_client import PipelineServiceAsyncClient + +__all__ = ( + 'PipelineServiceClient', + 'PipelineServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py new file mode 100644 index 0000000000..afe7a6b8e0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -0,0 +1,1069 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport +from .client import PipelineServiceClient + + +class PipelineServiceAsyncClient: + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + """ + + _client: PipelineServiceClient + + DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT + + artifact_path = staticmethod(PipelineServiceClient.artifact_path) + parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) + context_path = staticmethod(PipelineServiceClient.context_path) + parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) + custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) + endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) + execution_path = staticmethod(PipelineServiceClient.execution_path) + parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) + model_path = staticmethod(PipelineServiceClient.model_path) + parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) + network_path = staticmethod(PipelineServiceClient.network_path) + parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) + pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) + parse_pipeline_job_path = staticmethod(PipelineServiceClient.parse_pipeline_job_path) + training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) + parse_training_pipeline_path = staticmethod(PipelineServiceClient.parse_training_pipeline_path) + common_billing_account_path = staticmethod(PipelineServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PipelineServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PipelineServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PipelineServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PipelineServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PipelineServiceClient.common_project_path) + parse_common_project_path = staticmethod(PipelineServiceClient.parse_common_project_path) + common_location_path = staticmethod(PipelineServiceClient.common_location_path) + parse_common_location_path = staticmethod(PipelineServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceAsyncClient: The constructed client. + """ + return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceAsyncClient: The constructed client. + """ + return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PipelineServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PipelineServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the pipeline service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PipelineServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PipelineServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_training_pipeline(self, + request: pipeline_service.CreateTrainingPipelineRequest = None, + *, + parent: str = None, + training_pipeline: gca_training_pipeline.TrainingPipeline = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_training_pipeline.TrainingPipeline: + r"""Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest`): + The request object. Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. + parent (:class:`str`): + Required. The resource name of the Location to create + the TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + training_pipeline (:class:`google.cloud.aiplatform_v1.types.TrainingPipeline`): + Required. The TrainingPipeline to + create. + + This corresponds to the ``training_pipeline`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, training_pipeline]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CreateTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if training_pipeline is not None: + request.training_pipeline = training_pipeline + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_training_pipeline(self, + request: pipeline_service.GetTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> training_pipeline.TrainingPipeline: + r"""Gets a TrainingPipeline. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest`): + The request object. Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline resource. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.GetTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_training_pipelines(self, + request: pipeline_service.ListTrainingPipelinesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrainingPipelinesAsyncPager: + r"""Lists TrainingPipelines in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest`): + The request object. Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. + parent (:class:`str`): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: + Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.ListTrainingPipelinesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_training_pipelines, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTrainingPipelinesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_training_pipeline(self, + request: pipeline_service.DeleteTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TrainingPipeline. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest`): + The request object. Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline resource to + be deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.DeleteTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_training_pipeline(self, + request: pipeline_service.CancelTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest`): + The request object. Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CancelTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_pipeline_job(self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreatePipelineJobRequest`): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (:class:`google.cloud.aiplatform_v1.types.PipelineJob`): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (:class:`str`): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CreatePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_pipeline_job(self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetPipelineJobRequest`): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.GetPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_pipeline_jobs(self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsAsyncPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListPipelineJobsRequest`): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.ListPipelineJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_pipeline_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPipelineJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_pipeline_job(self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeletePipelineJobRequest`): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.DeletePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_pipeline_job(self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CancelPipelineJobRequest`): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CancelPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PipelineServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py new file mode 100644 index 0000000000..1fa4015ac6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -0,0 +1,1328 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PipelineServiceGrpcTransport +from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport + + +class PipelineServiceClientMeta(type): + """Metaclass for the PipelineService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] + _transport_registry["grpc"] = PipelineServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PipelineServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PipelineServiceClient(metaclass=PipelineServiceClientMeta): + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PipelineServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PipelineServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str,str]: + """Parses a artifact path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str,str]: + """Parses a context path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str,str]: + """Parses a execution path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def pipeline_job_path(project: str,location: str,pipeline_job: str,) -> str: + """Returns a fully-qualified pipeline_job string.""" + return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) + + @staticmethod + def parse_pipeline_job_path(path: str) -> Dict[str,str]: + """Parses a pipeline_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: + """Returns a fully-qualified training_pipeline string.""" + return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + + @staticmethod + def parse_training_pipeline_path(path: str) -> Dict[str,str]: + """Parses a training_pipeline path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PipelineServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the pipeline service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PipelineServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PipelineServiceTransport): + # transport is a PipelineServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_training_pipeline(self, + request: pipeline_service.CreateTrainingPipelineRequest = None, + *, + parent: str = None, + training_pipeline: gca_training_pipeline.TrainingPipeline = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_training_pipeline.TrainingPipeline: + r"""Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Args: + request (google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest): + The request object. Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. + parent (str): + Required. The resource name of the Location to create + the TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline): + Required. The TrainingPipeline to + create. + + This corresponds to the ``training_pipeline`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, training_pipeline]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreateTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreateTrainingPipelineRequest): + request = pipeline_service.CreateTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if training_pipeline is not None: + request.training_pipeline = training_pipeline + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_training_pipeline(self, + request: pipeline_service.GetTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> training_pipeline.TrainingPipeline: + r"""Gets a TrainingPipeline. + + Args: + request (google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest): + The request object. Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline resource. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetTrainingPipelineRequest): + request = pipeline_service.GetTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_training_pipelines(self, + request: pipeline_service.ListTrainingPipelinesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrainingPipelinesPager: + r"""Lists TrainingPipelines in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): + The request object. Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. + parent (str): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesPager: + Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListTrainingPipelinesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListTrainingPipelinesRequest): + request = pipeline_service.ListTrainingPipelinesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_training_pipelines] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTrainingPipelinesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_training_pipeline(self, + request: pipeline_service.DeleteTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TrainingPipeline. + + Args: + request (google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest): + The request object. Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline resource to + be deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeleteTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeleteTrainingPipelineRequest): + request = pipeline_service.DeleteTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_training_pipeline(self, + request: pipeline_service.CancelTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest): + The request object. Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelTrainingPipelineRequest): + request = pipeline_service.CancelTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_pipeline_job(self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (google.cloud.aiplatform_v1.types.CreatePipelineJobRequest): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + parent (str): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreatePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreatePipelineJobRequest): + request = pipeline_service.CreatePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_pipeline_job(self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1.types.GetPipelineJobRequest): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetPipelineJobRequest): + request = pipeline_service.GetPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_pipeline_jobs(self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListPipelineJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListPipelineJobsRequest): + request = pipeline_service.ListPipelineJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPipelineJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_pipeline_job(self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1.types.DeletePipelineJobRequest): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeletePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeletePipelineJobRequest): + request = pipeline_service.DeletePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_pipeline_job(self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1.types.CancelPipelineJobRequest): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelPipelineJobRequest): + request = pipeline_service.CancelPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PipelineServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py new file mode 100644 index 0000000000..faa4e19346 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import training_pipeline + + +class ListTrainingPipelinesPager: + """A pager for iterating through ``list_training_pipelines`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``training_pipelines`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTrainingPipelines`` requests and continue to iterate + through the ``training_pipelines`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., pipeline_service.ListTrainingPipelinesResponse], + request: pipeline_service.ListTrainingPipelinesRequest, + response: pipeline_service.ListTrainingPipelinesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListTrainingPipelinesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[pipeline_service.ListTrainingPipelinesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[training_pipeline.TrainingPipeline]: + for page in self.pages: + yield from page.training_pipelines + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrainingPipelinesAsyncPager: + """A pager for iterating through ``list_training_pipelines`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``training_pipelines`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTrainingPipelines`` requests and continue to iterate + through the ``training_pipelines`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[pipeline_service.ListTrainingPipelinesResponse]], + request: pipeline_service.ListTrainingPipelinesRequest, + response: pipeline_service.ListTrainingPipelinesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListTrainingPipelinesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[pipeline_service.ListTrainingPipelinesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[training_pipeline.TrainingPipeline]: + async def async_generator(): + async for page in self.pages: + for response in page.training_pipelines: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPipelineJobsPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., pipeline_service.ListPipelineJobsResponse], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[pipeline_job.PipelineJob]: + for page in self.pages: + yield from page.pipeline_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPipelineJobsAsyncPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[pipeline_job.PipelineJob]: + async def async_generator(): + async for page in self.pages: + for response in page.pipeline_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py new file mode 100644 index 0000000000..77051d8254 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PipelineServiceTransport +from .grpc import PipelineServiceGrpcTransport +from .grpc_asyncio import PipelineServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] +_transport_registry['grpc'] = PipelineServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PipelineServiceGrpcAsyncIOTransport + +__all__ = ( + 'PipelineServiceTransport', + 'PipelineServiceGrpcTransport', + 'PipelineServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py new file mode 100644 index 0000000000..f2f3cc392e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -0,0 +1,306 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class PipelineServiceTransport(abc.ABC): + """Abstract transport class for PipelineService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_training_pipeline: gapic_v1.method.wrap_method( + self.create_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.get_training_pipeline: gapic_v1.method.wrap_method( + self.get_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.list_training_pipelines: gapic_v1.method.wrap_method( + self.list_training_pipelines, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_training_pipeline: gapic_v1.method.wrap_method( + self.delete_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_training_pipeline: gapic_v1.method.wrap_method( + self.cancel_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.create_pipeline_job: gapic_v1.method.wrap_method( + self.create_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.get_pipeline_job: gapic_v1.method.wrap_method( + self.get_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.list_pipeline_jobs: gapic_v1.method.wrap_method( + self.list_pipeline_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_pipeline_job: gapic_v1.method.wrap_method( + self.delete_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.cancel_pipeline_job: gapic_v1.method.wrap_method( + self.cancel_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + Union[ + gca_training_pipeline.TrainingPipeline, + Awaitable[gca_training_pipeline.TrainingPipeline] + ]]: + raise NotImplementedError() + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + Union[ + training_pipeline.TrainingPipeline, + Awaitable[training_pipeline.TrainingPipeline] + ]]: + raise NotImplementedError() + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + Union[ + pipeline_service.ListTrainingPipelinesResponse, + Awaitable[pipeline_service.ListTrainingPipelinesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Union[ + gca_pipeline_job.PipelineJob, + Awaitable[gca_pipeline_job.PipelineJob] + ]]: + raise NotImplementedError() + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Union[ + pipeline_job.PipelineJob, + Awaitable[pipeline_job.PipelineJob] + ]]: + raise NotImplementedError() + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Union[ + pipeline_service.ListPipelineJobsResponse, + Awaitable[pipeline_service.ListPipelineJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'PipelineServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py new file mode 100644 index 0000000000..10bae61fa1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -0,0 +1,539 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO + + +class PipelineServiceGrpcTransport(PipelineServiceTransport): + """gRPC backend transport for PipelineService. + + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + gca_training_pipeline.TrainingPipeline]: + r"""Return a callable for the create training pipeline method over gRPC. + + Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Returns: + Callable[[~.CreateTrainingPipelineRequest], + ~.TrainingPipeline]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_training_pipeline' not in self._stubs: + self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline', + request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, + response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['create_training_pipeline'] + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + training_pipeline.TrainingPipeline]: + r"""Return a callable for the get training pipeline method over gRPC. + + Gets a TrainingPipeline. + + Returns: + Callable[[~.GetTrainingPipelineRequest], + ~.TrainingPipeline]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_training_pipeline' not in self._stubs: + self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline', + request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, + response_deserializer=training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['get_training_pipeline'] + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + pipeline_service.ListTrainingPipelinesResponse]: + r"""Return a callable for the list training pipelines method over gRPC. + + Lists TrainingPipelines in a Location. + + Returns: + Callable[[~.ListTrainingPipelinesRequest], + ~.ListTrainingPipelinesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_training_pipelines' not in self._stubs: + self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines', + request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, + response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, + ) + return self._stubs['list_training_pipelines'] + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete training pipeline method over gRPC. + + Deletes a TrainingPipeline. + + Returns: + Callable[[~.DeleteTrainingPipelineRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_training_pipeline' not in self._stubs: + self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline', + request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_training_pipeline'] + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel training pipeline method over gRPC. + + Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTrainingPipelineRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_training_pipeline' not in self._stubs: + self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline', + request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_training_pipeline'] + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + gca_pipeline_job.PipelineJob]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_pipeline_job' not in self._stubs: + self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob', + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['create_pipeline_job'] + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + pipeline_job.PipelineJob]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_pipeline_job' not in self._stubs: + self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob', + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['get_pipeline_job'] + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + pipeline_service.ListPipelineJobsResponse]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + ~.ListPipelineJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_pipeline_jobs' not in self._stubs: + self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs', + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs['list_pipeline_jobs'] + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_pipeline_job' not in self._stubs: + self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob', + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_pipeline_job'] + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_pipeline_job' not in self._stubs: + self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob', + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_pipeline_job'] + + +__all__ = ( + 'PipelineServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..db6ff3625b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -0,0 +1,543 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PipelineServiceGrpcTransport + + +class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): + """gRPC AsyncIO backend transport for PipelineService. + + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + Awaitable[gca_training_pipeline.TrainingPipeline]]: + r"""Return a callable for the create training pipeline method over gRPC. + + Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Returns: + Callable[[~.CreateTrainingPipelineRequest], + Awaitable[~.TrainingPipeline]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_training_pipeline' not in self._stubs: + self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline', + request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, + response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['create_training_pipeline'] + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + Awaitable[training_pipeline.TrainingPipeline]]: + r"""Return a callable for the get training pipeline method over gRPC. + + Gets a TrainingPipeline. + + Returns: + Callable[[~.GetTrainingPipelineRequest], + Awaitable[~.TrainingPipeline]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_training_pipeline' not in self._stubs: + self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline', + request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, + response_deserializer=training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['get_training_pipeline'] + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + Awaitable[pipeline_service.ListTrainingPipelinesResponse]]: + r"""Return a callable for the list training pipelines method over gRPC. + + Lists TrainingPipelines in a Location. + + Returns: + Callable[[~.ListTrainingPipelinesRequest], + Awaitable[~.ListTrainingPipelinesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_training_pipelines' not in self._stubs: + self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines', + request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, + response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, + ) + return self._stubs['list_training_pipelines'] + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete training pipeline method over gRPC. + + Deletes a TrainingPipeline. + + Returns: + Callable[[~.DeleteTrainingPipelineRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_training_pipeline' not in self._stubs: + self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline', + request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_training_pipeline'] + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel training pipeline method over gRPC. + + Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTrainingPipelineRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_training_pipeline' not in self._stubs: + self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline', + request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_training_pipeline'] + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Awaitable[gca_pipeline_job.PipelineJob]]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_pipeline_job' not in self._stubs: + self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob', + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['create_pipeline_job'] + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Awaitable[pipeline_job.PipelineJob]]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_pipeline_job' not in self._stubs: + self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob', + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['get_pipeline_job'] + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Awaitable[pipeline_service.ListPipelineJobsResponse]]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + Awaitable[~.ListPipelineJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_pipeline_jobs' not in self._stubs: + self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs', + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs['list_pipeline_jobs'] + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_pipeline_job' not in self._stubs: + self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob', + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_pipeline_job'] + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_pipeline_job' not in self._stubs: + self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob', + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_pipeline_job'] + + +__all__ = ( + 'PipelineServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py new file mode 100644 index 0000000000..13c5d11c66 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PredictionServiceClient +from .async_client import PredictionServiceAsyncClient + +__all__ = ( + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py new file mode 100644 index 0000000000..39d2b4fb93 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import prediction_service +from google.protobuf import struct_pb2 # type: ignore +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .client import PredictionServiceClient + + +class PredictionServiceAsyncClient: + """A service for online predictions and explanations.""" + + _client: PredictionServiceClient + + DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) + common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PredictionServiceClient.common_project_path) + parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) + common_location_path = staticmethod(PredictionServiceClient.common_location_path) + parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PredictionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def predict(self, + request: prediction_service.PredictRequest = None, + *, + endpoint: str = None, + parameters: struct_pb2.Value = None, + instances: Sequence[struct_pb2.Value] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.PredictRequest`): + The request object. Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`google.protobuf.struct_pb2.Value`): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): + Required. The instances that are the input to the + prediction call. A DeployedModel may have an upper limit + on the number of instances it supports per request, and + when it is exceeded the prediction call errors in case + of AutoML Models, or, in case of customer created + Models, the behaviour is as documented by that Model. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, parameters, instances]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if parameters is not None: + request.parameters = parameters + if instances: + request.instances.extend(instances) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.predict, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PredictionServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py new file mode 100644 index 0000000000..70658eb86c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -0,0 +1,459 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import prediction_service +from google.protobuf import struct_pb2 # type: ignore +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PredictionServiceGrpcTransport +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +class PredictionServiceClientMeta(type): + """Metaclass for the PredictionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] + _transport_registry["grpc"] = PredictionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PredictionServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PredictionServiceClient(metaclass=PredictionServiceClientMeta): + """A service for online predictions and explanations.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PredictionServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PredictionServiceTransport): + # transport is a PredictionServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def predict(self, + request: prediction_service.PredictRequest = None, + *, + endpoint: str = None, + parameters: struct_pb2.Value = None, + instances: Sequence[struct_pb2.Value] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. + + Args: + request (google.cloud.aiplatform_v1.types.PredictRequest): + The request object. Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + prediction call. A DeployedModel may have an upper limit + on the number of instances it supports per request, and + when it is exceeded the prediction call errors in case + of AutoML Models, or, in case of customer created + Models, the behaviour is as documented by that Model. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, parameters, instances]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.PredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.PredictRequest): + request = prediction_service.PredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if parameters is not None: + request.parameters = parameters + if instances is not None: + request.instances = instances + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PredictionServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py new file mode 100644 index 0000000000..d747de2ce9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PredictionServiceTransport +from .grpc import PredictionServiceGrpcTransport +from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] +_transport_registry['grpc'] = PredictionServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport + +__all__ = ( + 'PredictionServiceTransport', + 'PredictionServiceGrpcTransport', + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py new file mode 100644 index 0000000000..c40011876e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import prediction_service + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class PredictionServiceTransport(abc.ABC): + """Abstract transport class for PredictionService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.predict: gapic_v1.method.wrap_method( + self.predict, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Union[ + prediction_service.PredictResponse, + Awaitable[prediction_service.PredictResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'PredictionServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py new file mode 100644 index 0000000000..29f8097bb6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py @@ -0,0 +1,252 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import prediction_service +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO + + +class PredictionServiceGrpcTransport(PredictionServiceTransport): + """gRPC backend transport for PredictionService. + + A service for online predictions and explanations. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + prediction_service.PredictResponse]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. + + Returns: + Callable[[~.PredictRequest], + ~.PredictResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + +__all__ = ( + 'PredictionServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..3b2163fe26 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import prediction_service +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PredictionServiceGrpcTransport + + +class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): + """gRPC AsyncIO backend transport for PredictionService. + + A service for online predictions and explanations. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Awaitable[prediction_service.PredictResponse]]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. + + Returns: + Callable[[~.PredictRequest], + Awaitable[~.PredictResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + +__all__ = ( + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py new file mode 100644 index 0000000000..04af59e5fa --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SpecialistPoolServiceClient +from .async_client import SpecialistPoolServiceAsyncClient + +__all__ = ( + 'SpecialistPoolServiceClient', + 'SpecialistPoolServiceAsyncClient', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py new file mode 100644 index 0000000000..b622e7c910 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport +from .client import SpecialistPoolServiceClient + + +class SpecialistPoolServiceAsyncClient: + """A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + """ + + _client: SpecialistPoolServiceClient + + DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT + + specialist_pool_path = staticmethod(SpecialistPoolServiceClient.specialist_pool_path) + parse_specialist_pool_path = staticmethod(SpecialistPoolServiceClient.parse_specialist_pool_path) + common_billing_account_path = staticmethod(SpecialistPoolServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(SpecialistPoolServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(SpecialistPoolServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(SpecialistPoolServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(SpecialistPoolServiceClient.parse_common_organization_path) + common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path) + parse_common_project_path = staticmethod(SpecialistPoolServiceClient.parse_common_project_path) + common_location_path = staticmethod(SpecialistPoolServiceClient.common_location_path) + parse_common_location_path = staticmethod(SpecialistPoolServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceAsyncClient: The constructed client. + """ + return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceAsyncClient: The constructed client. + """ + return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SpecialistPoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpecialistPoolServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(SpecialistPoolServiceClient).get_transport_class, type(SpecialistPoolServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the specialist pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpecialistPoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = SpecialistPoolServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_specialist_pool(self, + request: specialist_pool_service.CreateSpecialistPoolRequest = None, + *, + parent: str = None, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a SpecialistPool. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest`): + The request object. Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. + parent (:class:`str`): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`): + Required. The SpecialistPool to + create. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers who are responsible for managing the + labelers in this pool as well as customers' data + labeling jobs associated with this pool. Customers + create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the + jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, specialist_pool]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.CreateSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if specialist_pool is not None: + request.specialist_pool = specialist_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_specialist_pool(self, + request: specialist_pool_service.GetSpecialistPoolRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> specialist_pool.SpecialistPool: + r"""Gets a SpecialistPool. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest`): + The request object. Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. + name (:class:`str`): + Required. The name of the SpecialistPool resource. The + form is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.SpecialistPool: + SpecialistPool represents customers' + own workforce to work on their data + labeling jobs. It includes a group of + specialist managers who are responsible + for managing the labelers in this pool + as well as customers' data labeling jobs + associated with this pool. + Customers create specialist pool as well + as start data labeling jobs on Cloud, + managers and labelers work with the jobs + using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.GetSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_specialist_pools(self, + request: specialist_pool_service.ListSpecialistPoolsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSpecialistPoolsAsyncPager: + r"""Lists SpecialistPools in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest`): + The request object. Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + parent (:class:`str`): + Required. The name of the SpecialistPool's parent + resource. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: + Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.ListSpecialistPoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_specialist_pools, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSpecialistPoolsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_specialist_pool(self, + request: specialist_pool_service.DeleteSpecialistPoolRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a SpecialistPool as well as all Specialists + in the pool. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest`): + The request object. Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. + name (:class:`str`): + Required. The resource name of the SpecialistPool to + delete. Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.DeleteSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_specialist_pool(self, + request: specialist_pool_service.UpdateSpecialistPoolRequest = None, + *, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a SpecialistPool. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest`): + The request object. Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. + specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`): + Required. The SpecialistPool which + replaces the resource on the server. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers who are responsible for managing the + labelers in this pool as well as customers' data + labeling jobs associated with this pool. Customers + create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the + jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([specialist_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.UpdateSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if specialist_pool is not None: + request.specialist_pool = specialist_pool + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("specialist_pool.name", request.specialist_pool.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SpecialistPoolServiceAsyncClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py new file mode 100644 index 0000000000..609815e067 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py @@ -0,0 +1,837 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import SpecialistPoolServiceGrpcTransport +from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport + + +class SpecialistPoolServiceClientMeta(type): + """Metaclass for the SpecialistPoolService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] + _transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport + _transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[SpecialistPoolServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): + """A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SpecialistPoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpecialistPoolServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def specialist_pool_path(project: str,location: str,specialist_pool: str,) -> str: + """Returns a fully-qualified specialist_pool string.""" + return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) + + @staticmethod + def parse_specialist_pool_path(path: str) -> Dict[str,str]: + """Parses a specialist_pool path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SpecialistPoolServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the specialist pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SpecialistPoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SpecialistPoolServiceTransport): + # transport is a SpecialistPoolServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_specialist_pool(self, + request: specialist_pool_service.CreateSpecialistPoolRequest = None, + *, + parent: str = None, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a SpecialistPool. + + Args: + request (google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest): + The request object. Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. + parent (str): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): + Required. The SpecialistPool to + create. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers who are responsible for managing the + labelers in this pool as well as customers' data + labeling jobs associated with this pool. Customers + create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the + jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, specialist_pool]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.CreateSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest): + request = specialist_pool_service.CreateSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if specialist_pool is not None: + request.specialist_pool = specialist_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + def get_specialist_pool(self, + request: specialist_pool_service.GetSpecialistPoolRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> specialist_pool.SpecialistPool: + r"""Gets a SpecialistPool. + + Args: + request (google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest): + The request object. Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. + name (str): + Required. The name of the SpecialistPool resource. The + form is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.SpecialistPool: + SpecialistPool represents customers' + own workforce to work on their data + labeling jobs. It includes a group of + specialist managers who are responsible + for managing the labelers in this pool + as well as customers' data labeling jobs + associated with this pool. + Customers create specialist pool as well + as start data labeling jobs on Cloud, + managers and labelers work with the jobs + using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.GetSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest): + request = specialist_pool_service.GetSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_specialist_pools(self, + request: specialist_pool_service.ListSpecialistPoolsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSpecialistPoolsPager: + r"""Lists SpecialistPools in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): + The request object. Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + parent (str): + Required. The name of the SpecialistPool's parent + resource. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: + Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.ListSpecialistPoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest): + request = specialist_pool_service.ListSpecialistPoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSpecialistPoolsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_specialist_pool(self, + request: specialist_pool_service.DeleteSpecialistPoolRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a SpecialistPool as well as all Specialists + in the pool. + + Args: + request (google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest): + The request object. Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. + name (str): + Required. The resource name of the SpecialistPool to + delete. Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.DeleteSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest): + request = specialist_pool_service.DeleteSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def update_specialist_pool(self, + request: specialist_pool_service.UpdateSpecialistPoolRequest = None, + *, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a SpecialistPool. + + Args: + request (google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest): + The request object. Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. + specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): + Required. The SpecialistPool which + replaces the resource on the server. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers who are responsible for managing the + labelers in this pool as well as customers' data + labeling jobs associated with this pool. Customers + create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the + jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([specialist_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.UpdateSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest): + request = specialist_pool_service.UpdateSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if specialist_pool is not None: + request.specialist_pool = specialist_pool + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("specialist_pool.name", request.specialist_pool.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SpecialistPoolServiceClient", +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py new file mode 100644 index 0000000000..8123ab4e42 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service + + +class ListSpecialistPoolsPager: + """A pager for iterating through ``list_specialist_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``specialist_pools`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSpecialistPools`` requests and continue to iterate + through the ``specialist_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., specialist_pool_service.ListSpecialistPoolsResponse], + request: specialist_pool_service.ListSpecialistPoolsRequest, + response: specialist_pool_service.ListSpecialistPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[specialist_pool_service.ListSpecialistPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[specialist_pool.SpecialistPool]: + for page in self.pages: + yield from page.specialist_pools + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListSpecialistPoolsAsyncPager: + """A pager for iterating through ``list_specialist_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``specialist_pools`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSpecialistPools`` requests and continue to iterate + through the ``specialist_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]], + request: specialist_pool_service.ListSpecialistPoolsRequest, + response: specialist_pool_service.ListSpecialistPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[specialist_pool_service.ListSpecialistPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[specialist_pool.SpecialistPool]: + async def async_generator(): + async for page in self.pages: + for response in page.specialist_pools: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py new file mode 100644 index 0000000000..ba8c9d7eb5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SpecialistPoolServiceTransport +from .grpc import SpecialistPoolServiceGrpcTransport +from .grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] +_transport_registry['grpc'] = SpecialistPoolServiceGrpcTransport +_transport_registry['grpc_asyncio'] = SpecialistPoolServiceGrpcAsyncIOTransport + +__all__ = ( + 'SpecialistPoolServiceTransport', + 'SpecialistPoolServiceGrpcTransport', + 'SpecialistPoolServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py new file mode 100644 index 0000000000..25948e8039 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class SpecialistPoolServiceTransport(abc.ABC): + """Abstract transport class for SpecialistPoolService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_specialist_pool: gapic_v1.method.wrap_method( + self.create_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + self.get_specialist_pool: gapic_v1.method.wrap_method( + self.get_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + self.list_specialist_pools: gapic_v1.method.wrap_method( + self.list_specialist_pools, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_specialist_pool: gapic_v1.method.wrap_method( + self.delete_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + self.update_specialist_pool: gapic_v1.method.wrap_method( + self.update_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + Union[ + specialist_pool.SpecialistPool, + Awaitable[specialist_pool.SpecialistPool] + ]]: + raise NotImplementedError() + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + Union[ + specialist_pool_service.ListSpecialistPoolsResponse, + Awaitable[specialist_pool_service.ListSpecialistPoolsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'SpecialistPoolServiceTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py new file mode 100644 index 0000000000..799a4a3e8d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.longrunning import operations_pb2 # type: ignore +from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO + + +class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport): + """gRPC backend transport for SpecialistPoolService. + + A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the create specialist pool method over gRPC. + + Creates a SpecialistPool. + + Returns: + Callable[[~.CreateSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_specialist_pool' not in self._stubs: + self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool', + request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_specialist_pool'] + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + specialist_pool.SpecialistPool]: + r"""Return a callable for the get specialist pool method over gRPC. + + Gets a SpecialistPool. + + Returns: + Callable[[~.GetSpecialistPoolRequest], + ~.SpecialistPool]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_specialist_pool' not in self._stubs: + self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool', + request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, + response_deserializer=specialist_pool.SpecialistPool.deserialize, + ) + return self._stubs['get_specialist_pool'] + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + specialist_pool_service.ListSpecialistPoolsResponse]: + r"""Return a callable for the list specialist pools method over gRPC. + + Lists SpecialistPools in a Location. + + Returns: + Callable[[~.ListSpecialistPoolsRequest], + ~.ListSpecialistPoolsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_specialist_pools' not in self._stubs: + self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools', + request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, + response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, + ) + return self._stubs['list_specialist_pools'] + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete specialist pool method over gRPC. + + Deletes a SpecialistPool as well as all Specialists + in the pool. + + Returns: + Callable[[~.DeleteSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_specialist_pool' not in self._stubs: + self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool', + request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_specialist_pool'] + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the update specialist pool method over gRPC. + + Updates a SpecialistPool. + + Returns: + Callable[[~.UpdateSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_specialist_pool' not in self._stubs: + self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool', + request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_specialist_pool'] + + +__all__ = ( + 'SpecialistPoolServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..8073b23e02 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -0,0 +1,386 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.longrunning import operations_pb2 # type: ignore +from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import SpecialistPoolServiceGrpcTransport + + +class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport): + """gRPC AsyncIO backend transport for SpecialistPoolService. + + A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create specialist pool method over gRPC. + + Creates a SpecialistPool. + + Returns: + Callable[[~.CreateSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_specialist_pool' not in self._stubs: + self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool', + request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_specialist_pool'] + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + Awaitable[specialist_pool.SpecialistPool]]: + r"""Return a callable for the get specialist pool method over gRPC. + + Gets a SpecialistPool. + + Returns: + Callable[[~.GetSpecialistPoolRequest], + Awaitable[~.SpecialistPool]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_specialist_pool' not in self._stubs: + self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool', + request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, + response_deserializer=specialist_pool.SpecialistPool.deserialize, + ) + return self._stubs['get_specialist_pool'] + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]]: + r"""Return a callable for the list specialist pools method over gRPC. + + Lists SpecialistPools in a Location. + + Returns: + Callable[[~.ListSpecialistPoolsRequest], + Awaitable[~.ListSpecialistPoolsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_specialist_pools' not in self._stubs: + self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools', + request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, + response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, + ) + return self._stubs['list_specialist_pools'] + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete specialist pool method over gRPC. + + Deletes a SpecialistPool as well as all Specialists + in the pool. + + Returns: + Callable[[~.DeleteSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_specialist_pool' not in self._stubs: + self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool', + request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_specialist_pool'] + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update specialist pool method over gRPC. + + Updates a SpecialistPool. + + Returns: + Callable[[~.UpdateSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_specialist_pool' not in self._stubs: + self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool', + request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_specialist_pool'] + + +__all__ = ( + 'SpecialistPoolServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py new file mode 100644 index 0000000000..22169ac9a9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py @@ -0,0 +1,429 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .annotation import ( + Annotation, +) +from .annotation_spec import ( + AnnotationSpec, +) +from .artifact import ( + Artifact, +) +from .batch_prediction_job import ( + BatchPredictionJob, +) +from .completion_stats import ( + CompletionStats, +) +from .context import ( + Context, +) +from .custom_job import ( + ContainerSpec, + CustomJob, + CustomJobSpec, + PythonPackageSpec, + Scheduling, + WorkerPoolSpec, +) +from .data_item import ( + DataItem, +) +from .data_labeling_job import ( + ActiveLearningConfig, + DataLabelingJob, + SampleConfig, + TrainingConfig, +) +from .dataset import ( + Dataset, + ExportDataConfig, + ImportDataConfig, +) +from .dataset_service import ( + CreateDatasetOperationMetadata, + CreateDatasetRequest, + DeleteDatasetRequest, + ExportDataOperationMetadata, + ExportDataRequest, + ExportDataResponse, + GetAnnotationSpecRequest, + GetDatasetRequest, + ImportDataOperationMetadata, + ImportDataRequest, + ImportDataResponse, + ListAnnotationsRequest, + ListAnnotationsResponse, + ListDataItemsRequest, + ListDataItemsResponse, + ListDatasetsRequest, + ListDatasetsResponse, + UpdateDatasetRequest, +) +from .deployed_model_ref import ( + DeployedModelRef, +) +from .encryption_spec import ( + EncryptionSpec, +) +from .endpoint import ( + DeployedModel, + Endpoint, +) +from .endpoint_service import ( + CreateEndpointOperationMetadata, + CreateEndpointRequest, + DeleteEndpointRequest, + DeployModelOperationMetadata, + DeployModelRequest, + DeployModelResponse, + GetEndpointRequest, + ListEndpointsRequest, + ListEndpointsResponse, + UndeployModelOperationMetadata, + UndeployModelRequest, + UndeployModelResponse, + UpdateEndpointRequest, +) +from .env_var import ( + EnvVar, +) +from .execution import ( + Execution, +) +from .hyperparameter_tuning_job import ( + HyperparameterTuningJob, +) +from .io import ( + BigQueryDestination, + BigQuerySource, + ContainerRegistryDestination, + GcsDestination, + GcsSource, +) +from .job_service import ( + CancelBatchPredictionJobRequest, + CancelCustomJobRequest, + CancelDataLabelingJobRequest, + CancelHyperparameterTuningJobRequest, + CreateBatchPredictionJobRequest, + CreateCustomJobRequest, + CreateDataLabelingJobRequest, + CreateHyperparameterTuningJobRequest, + DeleteBatchPredictionJobRequest, + DeleteCustomJobRequest, + DeleteDataLabelingJobRequest, + DeleteHyperparameterTuningJobRequest, + GetBatchPredictionJobRequest, + GetCustomJobRequest, + GetDataLabelingJobRequest, + GetHyperparameterTuningJobRequest, + ListBatchPredictionJobsRequest, + ListBatchPredictionJobsResponse, + ListCustomJobsRequest, + ListCustomJobsResponse, + ListDataLabelingJobsRequest, + ListDataLabelingJobsResponse, + ListHyperparameterTuningJobsRequest, + ListHyperparameterTuningJobsResponse, +) +from .machine_resources import ( + AutomaticResources, + AutoscalingMetricSpec, + BatchDedicatedResources, + DedicatedResources, + DiskSpec, + MachineSpec, + ResourcesConsumed, +) +from .manual_batch_tuning_parameters import ( + ManualBatchTuningParameters, +) +from .migratable_resource import ( + MigratableResource, +) +from .migration_service import ( + BatchMigrateResourcesOperationMetadata, + BatchMigrateResourcesRequest, + BatchMigrateResourcesResponse, + MigrateResourceRequest, + MigrateResourceResponse, + SearchMigratableResourcesRequest, + SearchMigratableResourcesResponse, +) +from .model import ( + Model, + ModelContainerSpec, + Port, + PredictSchemata, +) +from .model_evaluation import ( + ModelEvaluation, +) +from .model_evaluation_slice import ( + ModelEvaluationSlice, +) +from .model_service import ( + DeleteModelRequest, + ExportModelOperationMetadata, + ExportModelRequest, + ExportModelResponse, + GetModelEvaluationRequest, + GetModelEvaluationSliceRequest, + GetModelRequest, + ListModelEvaluationSlicesRequest, + ListModelEvaluationSlicesResponse, + ListModelEvaluationsRequest, + ListModelEvaluationsResponse, + ListModelsRequest, + ListModelsResponse, + UpdateModelRequest, + UploadModelOperationMetadata, + UploadModelRequest, + UploadModelResponse, +) +from .operation import ( + DeleteOperationMetadata, + GenericOperationMetadata, +) +from .pipeline_job import ( + PipelineJob, + PipelineJobDetail, + PipelineTaskDetail, + PipelineTaskExecutorDetail, +) +from .pipeline_service import ( + CancelPipelineJobRequest, + CancelTrainingPipelineRequest, + CreatePipelineJobRequest, + CreateTrainingPipelineRequest, + DeletePipelineJobRequest, + DeleteTrainingPipelineRequest, + GetPipelineJobRequest, + GetTrainingPipelineRequest, + ListPipelineJobsRequest, + ListPipelineJobsResponse, + ListTrainingPipelinesRequest, + ListTrainingPipelinesResponse, +) +from .prediction_service import ( + PredictRequest, + PredictResponse, +) +from .specialist_pool import ( + SpecialistPool, +) +from .specialist_pool_service import ( + CreateSpecialistPoolOperationMetadata, + CreateSpecialistPoolRequest, + DeleteSpecialistPoolRequest, + GetSpecialistPoolRequest, + ListSpecialistPoolsRequest, + ListSpecialistPoolsResponse, + UpdateSpecialistPoolOperationMetadata, + UpdateSpecialistPoolRequest, +) +from .study import ( + Measurement, + StudySpec, + Trial, +) +from .training_pipeline import ( + FilterSplit, + FractionSplit, + InputDataConfig, + PredefinedSplit, + TimestampSplit, + TrainingPipeline, +) +from .user_action_reference import ( + UserActionReference, +) +from .value import ( + Value, +) + +__all__ = ( + 'AcceleratorType', + 'Annotation', + 'AnnotationSpec', + 'Artifact', + 'BatchPredictionJob', + 'CompletionStats', + 'Context', + 'ContainerSpec', + 'CustomJob', + 'CustomJobSpec', + 'PythonPackageSpec', + 'Scheduling', + 'WorkerPoolSpec', + 'DataItem', + 'ActiveLearningConfig', + 'DataLabelingJob', + 'SampleConfig', + 'TrainingConfig', + 'Dataset', + 'ExportDataConfig', + 'ImportDataConfig', + 'CreateDatasetOperationMetadata', + 'CreateDatasetRequest', + 'DeleteDatasetRequest', + 'ExportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'ImportDataOperationMetadata', + 'ImportDataRequest', + 'ImportDataResponse', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeployedModelRef', + 'EncryptionSpec', + 'DeployedModel', + 'Endpoint', + 'CreateEndpointOperationMetadata', + 'CreateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelOperationMetadata', + 'DeployModelRequest', + 'DeployModelResponse', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UndeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UpdateEndpointRequest', + 'EnvVar', + 'Execution', + 'HyperparameterTuningJob', + 'BigQueryDestination', + 'BigQuerySource', + 'ContainerRegistryDestination', + 'GcsDestination', + 'GcsSource', + 'CancelBatchPredictionJobRequest', + 'CancelCustomJobRequest', + 'CancelDataLabelingJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'CreateCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'DeleteBatchPredictionJobRequest', + 'DeleteCustomJobRequest', + 'DeleteDataLabelingJobRequest', + 'DeleteHyperparameterTuningJobRequest', + 'GetBatchPredictionJobRequest', + 'GetCustomJobRequest', + 'GetDataLabelingJobRequest', + 'GetHyperparameterTuningJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'JobState', + 'AutomaticResources', + 'AutoscalingMetricSpec', + 'BatchDedicatedResources', + 'DedicatedResources', + 'DiskSpec', + 'MachineSpec', + 'ResourcesConsumed', + 'ManualBatchTuningParameters', + 'MigratableResource', + 'BatchMigrateResourcesOperationMetadata', + 'BatchMigrateResourcesRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceRequest', + 'MigrateResourceResponse', + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'Model', + 'ModelContainerSpec', + 'Port', + 'PredictSchemata', + 'ModelEvaluation', + 'ModelEvaluationSlice', + 'DeleteModelRequest', + 'ExportModelOperationMetadata', + 'ExportModelRequest', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'GetModelEvaluationSliceRequest', + 'GetModelRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'UpdateModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelRequest', + 'UploadModelResponse', + 'DeleteOperationMetadata', + 'GenericOperationMetadata', + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + 'CancelPipelineJobRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'CreateTrainingPipelineRequest', + 'DeletePipelineJobRequest', + 'DeleteTrainingPipelineRequest', + 'GetPipelineJobRequest', + 'GetTrainingPipelineRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'PipelineState', + 'PredictRequest', + 'PredictResponse', + 'SpecialistPool', + 'CreateSpecialistPoolOperationMetadata', + 'CreateSpecialistPoolRequest', + 'DeleteSpecialistPoolRequest', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'UpdateSpecialistPoolOperationMetadata', + 'UpdateSpecialistPoolRequest', + 'Measurement', + 'StudySpec', + 'Trial', + 'FilterSplit', + 'FractionSplit', + 'InputDataConfig', + 'PredefinedSplit', + 'TimestampSplit', + 'TrainingPipeline', + 'UserActionReference', + 'Value', +) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py new file mode 100644 index 0000000000..2c89d89653 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'AcceleratorType', + }, +) + + +class AcceleratorType(proto.Enum): + r"""Represents a hardware accelerator type.""" + ACCELERATOR_TYPE_UNSPECIFIED = 0 + NVIDIA_TESLA_K80 = 1 + NVIDIA_TESLA_P100 = 2 + NVIDIA_TESLA_V100 = 3 + NVIDIA_TESLA_P4 = 4 + NVIDIA_TESLA_T4 = 5 + NVIDIA_TESLA_A100 = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py new file mode 100644 index 0000000000..342d33e976 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import user_action_reference +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Annotation', + }, +) + + +class Annotation(proto.Message): + r"""Used to assign specific AnnotationSpec to a particular area + of a DataItem or the whole part of the DataItem. + + Attributes: + name (str): + Output only. Resource name of the Annotation. + payload_schema_uri (str): + Required. Google Cloud Storage URI points to a YAML file + describing + [payload][google.cloud.aiplatform.v1.Annotation.payload]. + The schema is defined as an `OpenAPI 3.0.2 Schema + Object `__. + The schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/, + note that the chosen schema must be consistent with the + parent Dataset's + [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri]. + payload (google.protobuf.struct_pb2.Value): + Required. The schema of the payload can be found in + [payload_schema][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Annotation + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Annotation + was last updated. + etag (str): + Optional. Used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + annotation_source (google.cloud.aiplatform_v1.types.UserActionReference): + Output only. The source of the Annotation. + labels (Sequence[google.cloud.aiplatform_v1.types.Annotation.LabelsEntry]): + Optional. The labels with user-defined metadata to organize + your Annotations. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Annotation(System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Annotation: + + - "aiplatform.googleapis.com/annotation_set_name": + optional, name of the UI's annotation set this Annotation + belongs to. If not set, the Annotation is not visible in + the UI. + + - "aiplatform.googleapis.com/payload_schema": output only, + its value is the + [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri] + title. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + payload_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + payload = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + annotation_source = proto.Field( + proto.MESSAGE, + number=5, + message=user_action_reference.UserActionReference, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py new file mode 100644 index 0000000000..950abfe6c4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'AnnotationSpec', + }, +) + + +class AnnotationSpec(proto.Message): + r"""Identifies a concept with which DataItems may be annotated + with. + + Attributes: + name (str): + Output only. Resource name of the + AnnotationSpec. + display_name (str): + Required. The user-defined name of the + AnnotationSpec. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + AnnotationSpec was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when AnnotationSpec + was last updated. + etag (str): + Optional. Used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py new file mode 100644 index 0000000000..6bd0512962 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Artifact', + }, +) + + +class Artifact(proto.Message): + r"""Instance of a general artifact. + Attributes: + name (str): + Output only. The resource name of the + Artifact. + display_name (str): + User provided display name of the Artifact. + May be up to 128 Unicode characters. + uri (str): + The uniform resource identifier of the + artifact file. May be empty if there is no + actual artifact file. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Artifact.LabelsEntry]): + The labels with user-defined metadata to + organize your Artifacts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Artifact (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + last updated. + state (google.cloud.aiplatform_v1.types.Artifact.State): + The state of this Artifact. This is a + property of the Artifact, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex Pipelines), + and the system does not prescribe or check the + validity of state transitions. + """ + class State(proto.Enum): + r"""Describes the state of the Artifact.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + LIVE = 2 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + uri = proto.Field( + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=10, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + state = proto.Field( + proto.ENUM, + number=13, + enum=State, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py new file mode 100644 index 0000000000..abeb95f208 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -0,0 +1,407 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import completion_stats as gca_completion_stats +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'BatchPredictionJob', + }, +) + + +class BatchPredictionJob(proto.Message): + r"""A job that uses a + [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to + produce predictions on multiple [input + instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If predictions for significant portion of the instances fail, the + job may finish without attempting predictions for all remaining + instances. + + Attributes: + name (str): + Output only. Resource name of the + BatchPredictionJob. + display_name (str): + Required. The user-defined name of this + BatchPredictionJob. + model (str): + Required. The name of the Model that produces + the predictions via this job, must share the + same ancestor Location. Starting this job has no + impact on any existing deployments of the Model + and their resources. + input_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.InputConfig): + Required. Input configuration of the instances on which + predictions are performed. The schema of any single instance + may be specified via the + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + model_parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the predictions. The schema of + the parameters may be specified via the + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + output_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputConfig): + Required. The Configuration specifying where output + predictions should be written. The schema of any single + prediction may be specified as a concatenation of + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. + dedicated_resources (google.cloud.aiplatform_v1.types.BatchDedicatedResources): + The config of resources used by the Model during the batch + prediction. If the Model + [supports][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types] + DEDICATED_RESOURCES this config may be provided (and the job + will use these resources), if the Model doesn't support + AUTOMATIC_RESOURCES, this config must be provided. + manual_batch_tuning_parameters (google.cloud.aiplatform_v1.types.ManualBatchTuningParameters): + Immutable. Parameters configuring the batch behavior. + Currently only applicable when + [dedicated_resources][google.cloud.aiplatform.v1.BatchPredictionJob.dedicated_resources] + are used (in other cases Vertex AI does the tuning itself). + output_info (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputInfo): + Output only. Information further describing + the output of this job. + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the job. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the job's state is + JOB_STATE_FAILED or JOB_STATE_CANCELLED. + partial_failures (Sequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + For example, single files that can't be read. + This field never exceeds 20 entries. + Status details fields contain standard GCP error + details. + resources_consumed (google.cloud.aiplatform_v1.types.ResourcesConsumed): + Output only. Information about resources that + had been consumed by this job. Provided in real + time at best effort basis, as well as a final + value once the job completes. + + Note: This field currently may be not populated + for batch predictions that use AutoML Models. + completion_stats (google.cloud.aiplatform_v1.types.CompletionStats): + Output only. Statistics on completed and + failed prediction instances. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob + was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob for the first + time entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob entered any of + the following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob.LabelsEntry]): + The labels with user-defined metadata to + organize BatchPredictionJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key options for a + BatchPredictionJob. If this is set, then all + resources created by the BatchPredictionJob will + be encrypted with the provided encryption key. + """ + + class InputConfig(proto.Message): + r"""Configures the input to + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + See + [Model.supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] + for Model's supported input formats, and how instances should be + expressed via any of them. + + Attributes: + gcs_source (google.cloud.aiplatform_v1.types.GcsSource): + The Cloud Storage location for the input + instances. + bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource): + The BigQuery location of the input table. + The schema of the table should be in the format + described by the given context OpenAPI Schema, + if one is provided. The table may contain + additional columns that are not described by the + schema, and they will be ignored. + instances_format (str): + Required. The format in which instances are given, must be + one of the + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats]. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=2, + oneof='source', + message=io.GcsSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=3, + oneof='source', + message=io.BigQuerySource, + ) + instances_format = proto.Field( + proto.STRING, + number=1, + ) + + class OutputConfig(proto.Message): + r"""Configures the output of + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + See + [Model.supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats] + for supported output formats, and how predictions are expressed via + any of them. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + The Cloud Storage location of the directory where the output + is to be written to. In the given directory a new directory + is created. Its name is + ``prediction--``, where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. + Inside of it files ``predictions_0001.``, + ``predictions_0002.``, ..., + ``predictions_N.`` are created where + ```` depends on chosen + [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format], + and N may equal 0001 and depends on the total number of + successfully predicted instances. If the Model has both + [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] + schemata defined then each such file contains predictions as + per the + [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format]. + If prediction for any instance failed (partially or + completely), then an additional ``errors_0001.``, + ``errors_0002.``,..., ``errors_N.`` + files are created (N depends on total number of failed + predictions). These files contain the failed instances, as + per their schema, followed by an additional ``error`` field + which as value has [google.rpc.Status][google.rpc.Status] + containing only ``code`` and ``message`` fields. + bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): + The BigQuery project or dataset location where the output is + to be written to. If project is provided, a new dataset is + created with name + ``prediction__`` where + is made BigQuery-dataset-name compatible (for example, most + special characters become underscores), and timestamp is in + YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset two tables will be created, ``predictions``, and + ``errors``. If the Model has both + [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] + schemata defined then the tables have columns as follows: + The ``predictions`` table contains instances for which the + prediction succeeded, it has columns as per a concatenation + of the Model's instance and prediction schemata. The + ``errors`` table contains rows for which the prediction has + failed, it has instance columns, as per the instance schema, + followed by a single "errors" column, which as values has + [google.rpc.Status][google.rpc.Status] represented as a + STRUCT, and containing only ``code`` and ``message``. + predictions_format (str): + Required. The format in which Vertex AI gives the + predictions, must be one of the + [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] + [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message=io.GcsDestination, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message=io.BigQueryDestination, + ) + predictions_format = proto.Field( + proto.STRING, + number=1, + ) + + class OutputInfo(proto.Message): + r"""Further describes this job's output. Supplements + [output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. + + Attributes: + gcs_output_directory (str): + Output only. The full path of the Cloud + Storage directory created, into which the + prediction output is written. + bigquery_output_dataset (str): + Output only. The path of the BigQuery dataset created, in + ``bq://projectId.bqDatasetId`` format, into which the + prediction output is written. + bigquery_output_table (str): + Output only. The name of the BigQuery table created, in + ``predictions_`` format, into which the + prediction output is written. Can be used by UI to generate + the BigQuery output path, for example. + """ + + gcs_output_directory = proto.Field( + proto.STRING, + number=1, + oneof='output_location', + ) + bigquery_output_dataset = proto.Field( + proto.STRING, + number=2, + oneof='output_location', + ) + bigquery_output_table = proto.Field( + proto.STRING, + number=4, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + model = proto.Field( + proto.STRING, + number=3, + ) + input_config = proto.Field( + proto.MESSAGE, + number=4, + message=InputConfig, + ) + model_parameters = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.Value, + ) + output_config = proto.Field( + proto.MESSAGE, + number=6, + message=OutputConfig, + ) + dedicated_resources = proto.Field( + proto.MESSAGE, + number=7, + message=machine_resources.BatchDedicatedResources, + ) + manual_batch_tuning_parameters = proto.Field( + proto.MESSAGE, + number=8, + message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters, + ) + output_info = proto.Field( + proto.MESSAGE, + number=9, + message=OutputInfo, + ) + state = proto.Field( + proto.ENUM, + number=10, + enum=job_state.JobState, + ) + error = proto.Field( + proto.MESSAGE, + number=11, + message=status_pb2.Status, + ) + partial_failures = proto.RepeatedField( + proto.MESSAGE, + number=12, + message=status_pb2.Status, + ) + resources_consumed = proto.Field( + proto.MESSAGE, + number=13, + message=machine_resources.ResourcesConsumed, + ) + completion_stats = proto.Field( + proto.MESSAGE, + number=14, + message=gca_completion_stats.CompletionStats, + ) + create_time = proto.Field( + proto.MESSAGE, + number=15, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=16, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=17, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=18, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=19, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=24, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py new file mode 100644 index 0000000000..289efbc59b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CompletionStats', + }, +) + + +class CompletionStats(proto.Message): + r"""Success and error statistics of processing multiple entities + (for example, DataItems or structured data rows) in batch. + + Attributes: + successful_count (int): + Output only. The number of entities that had + been processed successfully. + failed_count (int): + Output only. The number of entities for which + any error was encountered. + incomplete_count (int): + Output only. In cases when enough errors are + encountered a job, pipeline, or operation may be + failed as a whole. Below is the number of + entities for which the processing had not been + finished (either in successful or failed state). + Set to -1 if the number is unknown (for example, + the operation failed before the total entity + number could be collected). + """ + + successful_count = proto.Field( + proto.INT64, + number=1, + ) + failed_count = proto.Field( + proto.INT64, + number=2, + ) + incomplete_count = proto.Field( + proto.INT64, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py new file mode 100644 index 0000000000..ab2830492b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Context', + }, +) + + +class Context(proto.Message): + r"""Instance of a general context. + Attributes: + name (str): + Output only. The resource name of the + Context. + display_name (str): + User provided display name of the Context. + May be up to 128 Unicode characters. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Context.LabelsEntry]): + The labels with user-defined metadata to + organize your Contexts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Context (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + last updated. + parent_contexts (Sequence[str]): + Output only. A list of resource names of Contexts that are + parents of this Context. A Context may have at most 10 + parent_contexts. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=9, + ) + create_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + parent_contexts = proto.RepeatedField( + proto.STRING, + number=12, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py new file mode 100644 index 0000000000..a7fa52d537 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py @@ -0,0 +1,391 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CustomJob', + 'CustomJobSpec', + 'WorkerPoolSpec', + 'ContainerSpec', + 'PythonPackageSpec', + 'Scheduling', + }, +) + + +class CustomJob(proto.Message): + r"""Represents a job that runs custom workloads such as a Docker + container or a Python package. A CustomJob can have multiple + worker pools and each worker pool can have its own machine and + input spec. A CustomJob will be cleaned up once the job enters + terminal state (failed or succeeded). + + Attributes: + name (str): + Output only. Resource name of a CustomJob. + display_name (str): + Required. The display name of the CustomJob. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): + Required. Job spec. + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the job. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob was + created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob for the first time + entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob entered any of the + following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob was most + recently updated. + error (google.rpc.status_pb2.Status): + Output only. Only populated when job's state is + ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. + labels (Sequence[google.cloud.aiplatform_v1.types.CustomJob.LabelsEntry]): + The labels with user-defined metadata to + organize CustomJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key options for a + CustomJob. If this is set, then all resources + created by the CustomJob will be encrypted with + the provided encryption key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + job_spec = proto.Field( + proto.MESSAGE, + number=4, + message='CustomJobSpec', + ) + state = proto.Field( + proto.ENUM, + number=5, + enum=job_state.JobState, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=12, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class CustomJobSpec(proto.Message): + r"""Represents the spec of a CustomJob. + Attributes: + worker_pool_specs (Sequence[google.cloud.aiplatform_v1.types.WorkerPoolSpec]): + Required. The spec of the worker pools + including machine type and Docker image. All + worker pools except the first one are optional + and can be skipped by providing an empty value. + scheduling (google.cloud.aiplatform_v1.types.Scheduling): + Scheduling options for a CustomJob. + service_account (str): + Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this + run-as account. If unspecified, the `AI Platform Custom Code + Service + Agent `__ + for the CustomJob's project is used. + network (str): + The full name of the Compute Engine + `network `__ + to which the Job should be peered. For example, + ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + Private services access must already be configured for the + network. If left unspecified, the job is not peered with any + network. + base_output_directory (google.cloud.aiplatform_v1.types.GcsDestination): + The Cloud Storage location to store the output of this + CustomJob or HyperparameterTuningJob. For + HyperparameterTuningJob, the baseOutputDirectory of each + child CustomJob backing a Trial is set to a subdirectory of + name [id][google.cloud.aiplatform.v1.Trial.id] under its + parent HyperparameterTuningJob's baseOutputDirectory. + + The following Vertex AI environment variables will be passed + to containers or python modules when this field is set: + + For CustomJob: + + - AIP_MODEL_DIR = ``/model/`` + - AIP_CHECKPOINT_DIR = + ``/checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``/logs/`` + + For CustomJob backing a Trial of HyperparameterTuningJob: + + - AIP_MODEL_DIR = + ``//model/`` + - AIP_CHECKPOINT_DIR = + ``//checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``//logs/`` + """ + + worker_pool_specs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='WorkerPoolSpec', + ) + scheduling = proto.Field( + proto.MESSAGE, + number=3, + message='Scheduling', + ) + service_account = proto.Field( + proto.STRING, + number=4, + ) + network = proto.Field( + proto.STRING, + number=5, + ) + base_output_directory = proto.Field( + proto.MESSAGE, + number=6, + message=io.GcsDestination, + ) + + +class WorkerPoolSpec(proto.Message): + r"""Represents the spec of a worker pool in a job. + Attributes: + container_spec (google.cloud.aiplatform_v1.types.ContainerSpec): + The custom container task. + python_package_spec (google.cloud.aiplatform_v1.types.PythonPackageSpec): + The Python packaged task. + machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): + Optional. Immutable. The specification of a + single machine. + replica_count (int): + Optional. The number of worker replicas to + use for this worker pool. + disk_spec (google.cloud.aiplatform_v1.types.DiskSpec): + Disk spec. + """ + + container_spec = proto.Field( + proto.MESSAGE, + number=6, + oneof='task', + message='ContainerSpec', + ) + python_package_spec = proto.Field( + proto.MESSAGE, + number=7, + oneof='task', + message='PythonPackageSpec', + ) + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message=machine_resources.MachineSpec, + ) + replica_count = proto.Field( + proto.INT64, + number=2, + ) + disk_spec = proto.Field( + proto.MESSAGE, + number=5, + message=machine_resources.DiskSpec, + ) + + +class ContainerSpec(proto.Message): + r"""The spec of a Container. + Attributes: + image_uri (str): + Required. The URI of a container image in the + Container Registry that is to be run on each + worker replica. + command (Sequence[str]): + The command to be invoked when the container + is started. It overrides the entrypoint + instruction in Dockerfile when provided. + args (Sequence[str]): + The arguments to be passed when starting the + container. + env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): + Environment variables to be passed to the + container. + """ + + image_uri = proto.Field( + proto.STRING, + number=1, + ) + command = proto.RepeatedField( + proto.STRING, + number=2, + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=env_var.EnvVar, + ) + + +class PythonPackageSpec(proto.Message): + r"""The spec of a Python packaged code. + Attributes: + executor_image_uri (str): + Required. The URI of a container image in Artifact Registry + that will run the provided Python package. Vertex AI + provides a wide range of executor images with pre-installed + packages to meet users' various use cases. See the list of + `pre-built containers for + training `__. + You must use an image from this list. + package_uris (Sequence[str]): + Required. The Google Cloud Storage location + of the Python package files which are the + training program and its dependent packages. The + maximum number of package URIs is 100. + python_module (str): + Required. The Python module name to run after + installing the packages. + args (Sequence[str]): + Command line arguments to be passed to the + Python task. + env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): + Environment variables to be passed to the + python module. + """ + + executor_image_uri = proto.Field( + proto.STRING, + number=1, + ) + package_uris = proto.RepeatedField( + proto.STRING, + number=2, + ) + python_module = proto.Field( + proto.STRING, + number=3, + ) + args = proto.RepeatedField( + proto.STRING, + number=4, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=env_var.EnvVar, + ) + + +class Scheduling(proto.Message): + r"""All parameters related to queuing and scheduling of custom + jobs. + + Attributes: + timeout (google.protobuf.duration_pb2.Duration): + The maximum job running time. The default is + 7 days. + restart_job_on_worker_restart (bool): + Restarts the entire CustomJob if a worker + gets restarted. This feature can be used by + distributed training jobs that are not resilient + to workers leaving and joining a job. + """ + + timeout = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + restart_job_on_worker_restart = proto.Field( + proto.BOOL, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py new file mode 100644 index 0000000000..0ec4a5901e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'DataItem', + }, +) + + +class DataItem(proto.Message): + r"""A piece of data in a Dataset. Could be an image, a video, a + document or plain text. + + Attributes: + name (str): + Output only. The resource name of the + DataItem. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this DataItem was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this DataItem was + last updated. + labels (Sequence[google.cloud.aiplatform_v1.types.DataItem.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your DataItems. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one DataItem(System labels are + excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + payload (google.protobuf.struct_pb2.Value): + Required. The data that the DataItem represents (for + example, an image or a text snippet). The schema of the + payload is stored in the parent Dataset's [metadata + schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] + dataItemSchemaUri field. + etag (str): + Optional. Used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + payload = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py new file mode 100644 index 0000000000..f072ecc502 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py @@ -0,0 +1,332 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import job_state +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'DataLabelingJob', + 'ActiveLearningConfig', + 'SampleConfig', + 'TrainingConfig', + }, +) + + +class DataLabelingJob(proto.Message): + r"""DataLabelingJob is used to trigger a human labeling job on + unlabeled data from the following Dataset: + + Attributes: + name (str): + Output only. Resource name of the + DataLabelingJob. + display_name (str): + Required. The user-defined name of the + DataLabelingJob. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + Display name of a DataLabelingJob. + datasets (Sequence[str]): + Required. Dataset resource names. Right now we only support + labeling from a single Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + annotation_labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.AnnotationLabelsEntry]): + Labels to assign to annotations generated by + this DataLabelingJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. See https://goo.gl/xmQnxf for more + information and examples of labels. System + reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + labeler_count (int): + Required. Number of labelers to work on each + DataItem. + instruction_uri (str): + Required. The Google Cloud Storage location + of the instruction pdf. This pdf is shared with + labelers, and provides detailed description on + how to label DataItems in Datasets. + inputs_schema_uri (str): + Required. Points to a YAML file stored on + Google Cloud Storage describing the config for a + specific type of DataLabelingJob. The schema + files that can be used here are found in the + https://storage.googleapis.com/google-cloud- + aiplatform bucket in the + /schema/datalabelingjob/inputs/ folder. + inputs (google.protobuf.struct_pb2.Value): + Required. Input config parameters for the + DataLabelingJob. + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the job. + labeling_progress (int): + Output only. Current labeling job progress percentage scaled + in interval [0, 100], indicating the percentage of DataItems + that has been finished. + current_spend (google.type.money_pb2.Money): + Output only. Estimated cost(in US dollars) + that the DataLabelingJob has incurred to date. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + DataLabelingJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + DataLabelingJob was updated most recently. + error (google.rpc.status_pb2.Status): + Output only. DataLabelingJob errors. It is only populated + when job's state is ``JOB_STATE_FAILED`` or + ``JOB_STATE_CANCELLED``. + labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.LabelsEntry]): + The labels with user-defined metadata to organize your + DataLabelingJobs. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each DataLabelingJob: + + - "aiplatform.googleapis.com/schema": output only, its + value is the + [inputs_schema][google.cloud.aiplatform.v1.DataLabelingJob.inputs_schema_uri]'s + title. + specialist_pools (Sequence[str]): + The SpecialistPools' resource names + associated with this job. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + DataLabelingJob. If set, this DataLabelingJob + will be secured by this key. + Note: Annotations created in the DataLabelingJob + are associated with the EncryptionSpec of the + Dataset they are exported to. + active_learning_config (google.cloud.aiplatform_v1.types.ActiveLearningConfig): + Parameters that configure the active learning + pipeline. Active learning will label the data + incrementally via several iterations. For every + iteration, it will select a batch of data based + on the sampling strategy. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + datasets = proto.RepeatedField( + proto.STRING, + number=3, + ) + annotation_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=12, + ) + labeler_count = proto.Field( + proto.INT32, + number=4, + ) + instruction_uri = proto.Field( + proto.STRING, + number=5, + ) + inputs_schema_uri = proto.Field( + proto.STRING, + number=6, + ) + inputs = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Value, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=job_state.JobState, + ) + labeling_progress = proto.Field( + proto.INT32, + number=13, + ) + current_spend = proto.Field( + proto.MESSAGE, + number=14, + message=money_pb2.Money, + ) + create_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=22, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + specialist_pools = proto.RepeatedField( + proto.STRING, + number=16, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=20, + message=gca_encryption_spec.EncryptionSpec, + ) + active_learning_config = proto.Field( + proto.MESSAGE, + number=21, + message='ActiveLearningConfig', + ) + + +class ActiveLearningConfig(proto.Message): + r"""Parameters that configure the active learning pipeline. + Active learning will label the data incrementally by several + iterations. For every iteration, it will select a batch of data + based on the sampling strategy. + + Attributes: + max_data_item_count (int): + Max number of human labeled DataItems. + max_data_item_percentage (int): + Max percent of total DataItems for human + labeling. + sample_config (google.cloud.aiplatform_v1.types.SampleConfig): + Active learning data sampling config. For + every active learning labeling iteration, it + will select a batch of data based on the + sampling strategy. + training_config (google.cloud.aiplatform_v1.types.TrainingConfig): + CMLE training config. For every active + learning labeling iteration, system will train a + machine learning model on CMLE. The trained + model will be used by data sampling algorithm to + select DataItems. + """ + + max_data_item_count = proto.Field( + proto.INT64, + number=1, + oneof='human_labeling_budget', + ) + max_data_item_percentage = proto.Field( + proto.INT32, + number=2, + oneof='human_labeling_budget', + ) + sample_config = proto.Field( + proto.MESSAGE, + number=3, + message='SampleConfig', + ) + training_config = proto.Field( + proto.MESSAGE, + number=4, + message='TrainingConfig', + ) + + +class SampleConfig(proto.Message): + r"""Active learning data sampling config. For every active + learning labeling iteration, it will select a batch of data + based on the sampling strategy. + + Attributes: + initial_batch_sample_percentage (int): + The percentage of data needed to be labeled + in the first batch. + following_batch_sample_percentage (int): + The percentage of data needed to be labeled + in each following batch (except the first + batch). + sample_strategy (google.cloud.aiplatform_v1.types.SampleConfig.SampleStrategy): + Field to choose sampling strategy. Sampling + strategy will decide which data should be + selected for human labeling in every batch. + """ + class SampleStrategy(proto.Enum): + r"""Sample strategy decides which subset of DataItems should be + selected for human labeling in every batch. + """ + SAMPLE_STRATEGY_UNSPECIFIED = 0 + UNCERTAINTY = 1 + + initial_batch_sample_percentage = proto.Field( + proto.INT32, + number=1, + oneof='initial_batch_sample_size', + ) + following_batch_sample_percentage = proto.Field( + proto.INT32, + number=3, + oneof='following_batch_sample_size', + ) + sample_strategy = proto.Field( + proto.ENUM, + number=5, + enum=SampleStrategy, + ) + + +class TrainingConfig(proto.Message): + r"""CMLE training config. For every active learning labeling + iteration, system will train a machine learning model on CMLE. + The trained model will be used by data sampling algorithm to + select DataItems. + + Attributes: + timeout_training_milli_hours (int): + The timeout hours for the CMLE training job, + expressed in milli hours i.e. 1,000 value in + this field means 1 hour. + """ + + timeout_training_milli_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py new file mode 100644 index 0000000000..11c8574659 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import io +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Dataset', + 'ImportDataConfig', + 'ExportDataConfig', + }, +) + + +class Dataset(proto.Message): + r"""A collection of DataItems and Annotations on them. + Attributes: + name (str): + Output only. The resource name of the + Dataset. + display_name (str): + Required. The user-defined name of the + Dataset. The name can be up to 128 characters + long and can be consist of any UTF-8 characters. + metadata_schema_uri (str): + Required. Points to a YAML file stored on + Google Cloud Storage describing additional + information about the Dataset. The schema is + defined as an OpenAPI 3.0.2 Schema Object. The + schema files that can be used here are found in + gs://google-cloud- + aiplatform/schema/dataset/metadata/. + metadata (google.protobuf.struct_pb2.Value): + Required. Additional information about the + Dataset. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Dataset was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Dataset was + last updated. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Dataset.LabelsEntry]): + The labels with user-defined metadata to organize your + Datasets. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Dataset (System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Dataset: + + - "aiplatform.googleapis.com/dataset_metadata_schema": + output only, its value is the + [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] + title. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Dataset. If set, this Dataset and all sub- + resources of this Dataset will be secured by + this key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + metadata = proto.Field( + proto.MESSAGE, + number=8, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=6, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=11, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class ImportDataConfig(proto.Message): + r"""Describes the location from where we import data into a + Dataset, together with the labels that will be applied to the + DataItems and the Annotations. + + Attributes: + gcs_source (google.cloud.aiplatform_v1.types.GcsSource): + The Google Cloud Storage location for the + input content. + data_item_labels (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig.DataItemLabelsEntry]): + Labels that will be applied to newly imported DataItems. If + an identical DataItem as one being imported already exists + in the Dataset, then these labels will be appended to these + of the already existing one, and if labels with identical + key is imported before, the old label value will be + overwritten. If two DataItems are identical in the same + import data operation, the labels will be combined and if + key collision happens in this case, one of the values will + be picked randomly. Two DataItems are considered identical + if their content bytes are identical (e.g. image bytes or + pdf bytes). These labels will be overridden by Annotation + labels specified inside index file referenced by + [import_schema_uri][google.cloud.aiplatform.v1.ImportDataConfig.import_schema_uri], + e.g. jsonl file. + import_schema_uri (str): + Required. Points to a YAML file stored on Google Cloud + Storage describing the import format. Validation will be + done against the schema. The schema is defined as an + `OpenAPI 3.0.2 Schema + Object `__. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + oneof='source', + message=io.GcsSource, + ) + data_item_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + import_schema_uri = proto.Field( + proto.STRING, + number=4, + ) + + +class ExportDataConfig(proto.Message): + r"""Describes what part of the Dataset is to be exported, the + destination of the export and how to export. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + The Google Cloud Storage location where the output is to be + written to. In the given directory a new directory will be + created with name: + ``export-data--`` + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format. All export output will be written into that + directory. Inside that directory, annotations with the same + schema will be grouped into sub directories which are named + with the corresponding annotations' schema title. Inside + these sub directories, a schema.yaml will be created to + describe the output format. + annotations_filter (str): + A filter on Annotations of the Dataset. Only Annotations on + to-be-exported DataItems(specified by [data_items_filter][]) + that match this filter will be exported. The filter syntax + is the same as in + [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message=io.GcsDestination, + ) + annotations_filter = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py new file mode 100644 index 0000000000..bcd7ca90bf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py @@ -0,0 +1,542 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateDatasetRequest', + 'CreateDatasetOperationMetadata', + 'GetDatasetRequest', + 'UpdateDatasetRequest', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'DeleteDatasetRequest', + 'ImportDataRequest', + 'ImportDataResponse', + 'ImportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'ExportDataOperationMetadata', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'GetAnnotationSpecRequest', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + }, +) + + +class CreateDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Dataset in. Format: + ``projects/{project}/locations/{location}`` + dataset (google.cloud.aiplatform_v1.types.Dataset): + Required. The Dataset to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + dataset = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.Dataset, + ) + + +class CreateDatasetOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. + + Attributes: + name (str): + Required. The name of the Dataset resource. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. + + Attributes: + dataset (google.cloud.aiplatform_v1.types.Dataset): + Required. The Dataset which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + """ + + dataset = proto.Field( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListDatasetsRequest(proto.Message): + r"""Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + + Attributes: + parent (str): + Required. The name of the Dataset's parent resource. Format: + ``projects/{project}/locations/{location}`` + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``display_name``: supports = and != + - ``metadata_schema_uri``: supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDatasetsResponse(proto.Message): + r"""Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. + + Attributes: + datasets (Sequence[google.cloud.aiplatform_v1.types.Dataset]): + A list of Datasets that matches the specified + filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + datasets = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. + + Attributes: + name (str): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ImportDataRequest(proto.Message): + r"""Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + + Attributes: + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]): + Required. The desired input locations. The + contents of all input locations will be imported + in one batch. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + import_configs = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gca_dataset.ImportDataConfig, + ) + + +class ImportDataResponse(proto.Message): + r"""Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + """ + + +class ImportDataOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class ExportDataRequest(proto.Message): + r"""Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + Attributes: + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + export_config (google.cloud.aiplatform_v1.types.ExportDataConfig): + Required. The desired output location. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + export_config = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.ExportDataConfig, + ) + + +class ExportDataResponse(proto.Message): + r"""Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + Attributes: + exported_files (Sequence[str]): + All of the files that are exported in this + export operation. + """ + + exported_files = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class ExportDataOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + gcs_output_directory (str): + A Google Cloud Storage directory which path + ends with '/'. The exported data is stored in + the directory. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + gcs_output_directory = proto.Field( + proto.STRING, + number=2, + ) + + +class ListDataItemsRequest(proto.Message): + r"""Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + + Attributes: + parent (str): + Required. The resource name of the Dataset to list DataItems + from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a + field name for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDataItemsResponse(proto.Message): + r"""Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. + + Attributes: + data_items (Sequence[google.cloud.aiplatform_v1.types.DataItem]): + A list of DataItems that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + data_items = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=data_item.DataItem, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetAnnotationSpecRequest(proto.Message): + r"""Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. + + Attributes: + name (str): + Required. The name of the AnnotationSpec resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListAnnotationsRequest(proto.Message): + r"""Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + + Attributes: + parent (str): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a + field name for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListAnnotationsResponse(proto.Message): + r"""Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. + + Attributes: + annotations (Sequence[google.cloud.aiplatform_v1.types.Annotation]): + A list of Annotations that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + annotations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=annotation.Annotation, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py new file mode 100644 index 0000000000..b42f406e8c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'DeployedModelRef', + }, +) + + +class DeployedModelRef(proto.Message): + r"""Points to a DeployedModel. + Attributes: + endpoint (str): + Immutable. A resource name of an Endpoint. + deployed_model_id (str): + Immutable. An ID of a DeployedModel in the + above Endpoint. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py new file mode 100644 index 0000000000..3eda5aeb6d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'EncryptionSpec', + }, +) + + +class EncryptionSpec(proto.Message): + r"""Represents a customer-managed encryption key spec that can be + applied to a top-level resource. + + Attributes: + kms_key_name (str): + Required. The Cloud KMS resource identifier of the customer + managed encryption key used to protect a resource. Has the + form: + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. + """ + + kms_key_name = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py new file mode 100644 index 0000000000..b8bbf96850 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py @@ -0,0 +1,238 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import machine_resources +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Endpoint', + 'DeployedModel', + }, +) + + +class Endpoint(proto.Message): + r"""Models are deployed into it, and afterwards Endpoint is + called to obtain predictions and explanations. + + Attributes: + name (str): + Output only. The resource name of the + Endpoint. + display_name (str): + Required. The display name of the Endpoint. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Endpoint. + deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModel]): + Output only. The models deployed in this Endpoint. To add or + remove DeployedModels use + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel] + and + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel] + respectively. + traffic_split (Sequence[google.cloud.aiplatform_v1.types.Endpoint.TrafficSplitEntry]): + A map from a DeployedModel's ID to the + percentage of this Endpoint's traffic that + should be forwarded to that DeployedModel. + If a DeployedModel's ID is not listed in this + map, then it receives no traffic. + + The traffic percentage values must add up to + 100, or map must be empty if the Endpoint is to + not accept any traffic at a moment. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Endpoint.LabelsEntry]): + The labels with user-defined metadata to + organize your Endpoints. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Endpoint was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Endpoint was + last updated. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for an + Endpoint. If set, this Endpoint and all sub- + resources of this Endpoint will be secured by + this key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + deployed_models = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='DeployedModel', + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=5, + ) + etag = proto.Field( + proto.STRING, + number=6, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + create_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=10, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class DeployedModel(proto.Message): + r"""A deployment of a Model. Endpoints contain one or more + DeployedModels. + + Attributes: + dedicated_resources (google.cloud.aiplatform_v1.types.DedicatedResources): + A description of resources that are dedicated + to the DeployedModel, and that need a higher + degree of manual configuration. + automatic_resources (google.cloud.aiplatform_v1.types.AutomaticResources): + A description of resources that to large + degree are decided by AI Platform, and require + only a modest additional configuration. + id (str): + Output only. The ID of the DeployedModel. + model (str): + Required. The name of the Model that this is + the deployment of. Note that the Model may be in + a different location than the DeployedModel's + Endpoint. + display_name (str): + The display name of the DeployedModel. If not provided upon + creation, the Model's display_name is used. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the DeployedModel + was created. + service_account (str): + The service account that the DeployedModel's container runs + as. Specify the email address of the service account. If + this service account is not specified, the container runs as + a service account that doesn't have access to the resource + project. + + Users deploying the Model must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + disable_container_logging (bool): + For custom-trained Models and AutoML Tabular Models, the + container of the DeployedModel instances will send + ``stderr`` and ``stdout`` streams to Stackdriver Logging by + default. Please note that the logs incur cost, which are + subject to `Cloud Logging + pricing `__. + + User can disable container logging by setting this flag to + true. + enable_access_logging (bool): + These logs are like standard server access + logs, containing information like timestamp and + latency for each prediction request. + Note that Stackdriver logs may incur a cost, + especially if your project receives prediction + requests at a high queries per second rate + (QPS). Estimate your costs before enabling this + option. + """ + + dedicated_resources = proto.Field( + proto.MESSAGE, + number=7, + oneof='prediction_resources', + message=machine_resources.DedicatedResources, + ) + automatic_resources = proto.Field( + proto.MESSAGE, + number=8, + oneof='prediction_resources', + message=machine_resources.AutomaticResources, + ) + id = proto.Field( + proto.STRING, + number=1, + ) + model = proto.Field( + proto.STRING, + number=2, + ) + display_name = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + service_account = proto.Field( + proto.STRING, + number=11, + ) + disable_container_logging = proto.Field( + proto.BOOL, + number=15, + ) + enable_access_logging = proto.Field( + proto.BOOL, + number=13, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py new file mode 100644 index 0000000000..8a6b87d800 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py @@ -0,0 +1,394 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateEndpointRequest', + 'CreateEndpointOperationMetadata', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UpdateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelRequest', + 'DeployModelResponse', + 'DeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UndeployModelOperationMetadata', + }, +) + + +class CreateEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Endpoint in. Format: + ``projects/{project}/locations/{location}`` + endpoint (google.cloud.aiplatform_v1.types.Endpoint): + Required. The Endpoint to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + endpoint = proto.Field( + proto.MESSAGE, + number=2, + message=gca_endpoint.Endpoint, + ) + + +class CreateEndpointOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] + + Attributes: + name (str): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListEndpointsRequest(proto.Message): + r"""Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``endpoint`` supports = and !=. ``endpoint`` represents + the Endpoint ID, i.e. the last segment of the Endpoint's + [resource + name][google.cloud.aiplatform.v1.Endpoint.name]. + - ``display_name`` supports = and, != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``endpoint=1`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1.ListEndpointsResponse.next_page_token] + of the previous + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListEndpointsResponse(proto.Message): + r"""Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. + + Attributes: + endpoints (Sequence[google.cloud.aiplatform_v1.types.Endpoint]): + List of Endpoints in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1.ListEndpointsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + endpoints = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_endpoint.Endpoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + + Attributes: + endpoint (google.cloud.aiplatform_v1.types.Endpoint): + Required. The Endpoint which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + endpoint = proto.Field( + proto.MESSAGE, + number=1, + message=gca_endpoint.Endpoint, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. + + Attributes: + name (str): + Required. The name of the Endpoint resource to be deleted. + Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class DeployModelRequest(proto.Message): + r"""Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint resource into which to + deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. + traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]): + A map from a DeployedModel's ID to the percentage of this + Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the just + being deployed Model, a "0" should be used, and the actual + ID of the new DeployedModel will be filled in its place by + this method. The traffic percentage values must add up to + 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + is not updated. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model = proto.Field( + proto.MESSAGE, + number=2, + message=gca_endpoint.DeployedModel, + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=3, + ) + + +class DeployModelResponse(proto.Message): + r"""Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + Attributes: + deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): + The DeployedModel that had been deployed in + the Endpoint. + """ + + deployed_model = proto.Field( + proto.MESSAGE, + number=1, + message=gca_endpoint.DeployedModel, + ) + + +class DeployModelOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UndeployModelRequest(proto.Message): + r"""Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint resource from which to + undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + deployed_model_id (str): + Required. The ID of the DeployedModel to be + undeployed from the Endpoint. + traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is being + undeployed from the Endpoint, the [Endpoint.traffic_split] + will always end up empty when this call returns. A + DeployedModel will be successfully undeployed only if it + doesn't have any traffic assigned to it when this method + executes, or if this field unassigns any traffic to it. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=3, + ) + + +class UndeployModelResponse(proto.Message): + r"""Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + """ + + +class UndeployModelOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py new file mode 100644 index 0000000000..956d93aff5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'EnvVar', + }, +) + + +class EnvVar(proto.Message): + r"""Represents an environment variable present in a Container or + Python Module. + + Attributes: + name (str): + Required. Name of the environment variable. + Must be a valid C identifier. + value (str): + Required. Variables that reference a $(VAR_NAME) are + expanded using the previous defined environment variables in + the container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped + references will never be expanded, regardless of whether the + variable exists or not. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py new file mode 100644 index 0000000000..528814fe52 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Execution', + }, +) + + +class Execution(proto.Message): + r"""Instance of a general execution. + Attributes: + name (str): + Output only. The resource name of the + Execution. + display_name (str): + User provided display name of the Execution. + May be up to 128 Unicode characters. + state (google.cloud.aiplatform_v1.types.Execution.State): + The state of this Execution. This is a + property of the Execution, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex Pipelines) + and the system does not prescribe or check the + validity of state transitions. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Execution.LabelsEntry]): + The labels with user-defined metadata to + organize your Executions. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Execution (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was last updated. + """ + class State(proto.Enum): + r"""Describes the state of the Execution.""" + STATE_UNSPECIFIED = 0 + NEW = 1 + RUNNING = 2 + COMPLETE = 3 + FAILED = 4 + CACHED = 5 + CANCELLED = 6 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=10, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py new file mode 100644 index 0000000000..c402d8b609 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import study +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'HyperparameterTuningJob', + }, +) + + +class HyperparameterTuningJob(proto.Message): + r"""Represents a HyperparameterTuningJob. A + HyperparameterTuningJob has a Study specification and multiple + CustomJobs with identical CustomJob specification. + + Attributes: + name (str): + Output only. Resource name of the + HyperparameterTuningJob. + display_name (str): + Required. The display name of the + HyperparameterTuningJob. The name can be up to + 128 characters long and can be consist of any + UTF-8 characters. + study_spec (google.cloud.aiplatform_v1.types.StudySpec): + Required. Study configuration of the + HyperparameterTuningJob. + max_trial_count (int): + Required. The desired total number of Trials. + parallel_trial_count (int): + Required. The desired number of Trials to run + in parallel. + max_failed_trial_count (int): + The number of failed Trials that need to be + seen before failing the HyperparameterTuningJob. + If set to 0, Vertex AI decides how many Trials + must fail before the whole job fails. + trial_job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): + Required. The spec of a trial job. The same + spec applies to the CustomJobs created in all + the trials. + trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): + Output only. Trials of the + HyperparameterTuningJob. + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the job. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + HyperparameterTuningJob was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the HyperparameterTuningJob for the + first time entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the HyperparameterTuningJob entered + any of the following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + HyperparameterTuningJob was most recently + updated. + error (google.rpc.status_pb2.Status): + Output only. Only populated when job's state is + JOB_STATE_FAILED or JOB_STATE_CANCELLED. + labels (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob.LabelsEntry]): + The labels with user-defined metadata to + organize HyperparameterTuningJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key options for a + HyperparameterTuningJob. If this is set, then + all resources created by the + HyperparameterTuningJob will be encrypted with + the provided encryption key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + study_spec = proto.Field( + proto.MESSAGE, + number=4, + message=study.StudySpec, + ) + max_trial_count = proto.Field( + proto.INT32, + number=5, + ) + parallel_trial_count = proto.Field( + proto.INT32, + number=6, + ) + max_failed_trial_count = proto.Field( + proto.INT32, + number=7, + ) + trial_job_spec = proto.Field( + proto.MESSAGE, + number=8, + message=custom_job.CustomJobSpec, + ) + trials = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=study.Trial, + ) + state = proto.Field( + proto.ENUM, + number=10, + enum=job_state.JobState, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=15, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=16, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=17, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py new file mode 100644 index 0000000000..4cb224ae58 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'GcsSource', + 'GcsDestination', + 'BigQuerySource', + 'BigQueryDestination', + 'ContainerRegistryDestination', + }, +) + + +class GcsSource(proto.Message): + r"""The Google Cloud Storage location for the input content. + Attributes: + uris (Sequence[str]): + Required. Google Cloud Storage URI(-s) to the + input file(s). May contain wildcards. For more + information on wildcards, see + https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + """ + + uris = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class GcsDestination(proto.Message): + r"""The Google Cloud Storage location where the output is to be + written to. + + Attributes: + output_uri_prefix (str): + Required. Google Cloud Storage URI to output + directory. If the uri doesn't end with '/', a + '/' will be automatically appended. The + directory is created if it doesn't exist. + """ + + output_uri_prefix = proto.Field( + proto.STRING, + number=1, + ) + + +class BigQuerySource(proto.Message): + r"""The BigQuery location for the input content. + Attributes: + input_uri (str): + Required. BigQuery URI to a table, up to 2000 characters + long. Accepted forms: + + - BigQuery path. For example: + ``bq://projectId.bqDatasetId.bqTableId``. + """ + + input_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class BigQueryDestination(proto.Message): + r"""The BigQuery location for the output content. + Attributes: + output_uri (str): + Required. BigQuery URI to a project or table, up to 2000 + characters long. + + When only the project is specified, the Dataset and Table is + created. When the full table reference is specified, the + Dataset must exist and table must not exist. + + Accepted forms: + + - BigQuery path. For example: ``bq://projectId`` or + ``bq://projectId.bqDatasetId`` or + ``bq://projectId.bqDatasetId.bqTableId``. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class ContainerRegistryDestination(proto.Message): + r"""The Container Registry location for the container image. + Attributes: + output_uri (str): + Required. Container Registry URI of a container image. Only + Google Container Registry and Artifact Registry are + supported now. Accepted forms: + + - Google Container Registry path. For example: + ``gcr.io/projectId/imageName:tag``. + + - Artifact Registry path. For example: + ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. + + If a tag is not specified, "latest" will be used as the + default tag. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py new file mode 100644 index 0000000000..8d39c4727b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py @@ -0,0 +1,723 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateCustomJobRequest', + 'GetCustomJobRequest', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'DeleteCustomJobRequest', + 'CancelCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'GetDataLabelingJobRequest', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'DeleteDataLabelingJobRequest', + 'CancelDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'GetHyperparameterTuningJobRequest', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'DeleteHyperparameterTuningJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'GetBatchPredictionJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'DeleteBatchPredictionJobRequest', + 'CancelBatchPredictionJobRequest', + }, +) + + +class CreateCustomJobRequest(proto.Message): + r"""Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + CustomJob in. Format: + ``projects/{project}/locations/{location}`` + custom_job (google.cloud.aiplatform_v1.types.CustomJob): + Required. The CustomJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + custom_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_custom_job.CustomJob, + ) + + +class GetCustomJobRequest(proto.Message): + r"""Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListCustomJobsRequest(proto.Message): + r"""Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListCustomJobsResponse.next_page_token] + of the previous + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListCustomJobsResponse(proto.Message): + r"""Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] + + Attributes: + custom_jobs (Sequence[google.cloud.aiplatform_v1.types.CustomJob]): + List of CustomJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1.ListCustomJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + custom_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_custom_job.CustomJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteCustomJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob resource to be deleted. + Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelCustomJobRequest(proto.Message): + r"""Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. + + Attributes: + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob): + Required. The DataLabelingJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + data_labeling_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_data_labeling_job.DataLabelingJob, + ) + + +class GetDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListDataLabelingJobsRequest(proto.Message): + r"""Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + + Attributes: + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. FieldMask represents a + set of symbolic field paths. For example, the mask can be + ``paths: "name"``. The "name" here is a field in + DataLabelingJob. If this field is not set, all fields of the + DataLabelingJob are returned. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order by default. Use ``desc`` after a field name + for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDataLabelingJobsResponse(proto.Message): + r"""Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. + + Attributes: + data_labeling_jobs (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob]): + A list of DataLabelingJobs that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + data_labeling_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_data_labeling_job.DataLabelingJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob): + Required. The HyperparameterTuningJob to + create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + hyperparameter_tuning_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, + ) + + +class GetHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob resource. + Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListHyperparameterTuningJobsRequest(proto.Message): + r"""Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsResponse.next_page_token] + of the previous + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListHyperparameterTuningJobsResponse(proto.Message): + r"""Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] + + Attributes: + hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob]): + List of HyperparameterTuningJobs in the requested page. + [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1.HyperparameterTuningJob.trials] + of the jobs will be not be returned. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + hyperparameter_tuning_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob resource + to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob to cancel. + Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob): + Required. The BatchPredictionJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + batch_prediction_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_batch_prediction_job.BatchPredictionJob, + ) + + +class GetBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListBatchPredictionJobsRequest(proto.Message): + r"""Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + - ``model_display_name`` supports = and != + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsResponse.next_page_token] + of the previous + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListBatchPredictionJobsResponse(proto.Message): + r"""Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] + + Attributes: + batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob]): + List of BatchPredictionJobs in the requested + page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + batch_prediction_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_batch_prediction_job.BatchPredictionJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py new file mode 100644 index 0000000000..e28545fb22 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'JobState', + }, +) + + +class JobState(proto.Enum): + r"""Describes the state of a job.""" + JOB_STATE_UNSPECIFIED = 0 + JOB_STATE_QUEUED = 1 + JOB_STATE_PENDING = 2 + JOB_STATE_RUNNING = 3 + JOB_STATE_SUCCEEDED = 4 + JOB_STATE_FAILED = 5 + JOB_STATE_CANCELLING = 6 + JOB_STATE_CANCELLED = 7 + JOB_STATE_PAUSED = 8 + JOB_STATE_EXPIRED = 9 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py new file mode 100644 index 0000000000..5bd4d6d5ad --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import accelerator_type as gca_accelerator_type + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'MachineSpec', + 'DedicatedResources', + 'AutomaticResources', + 'BatchDedicatedResources', + 'ResourcesConsumed', + 'DiskSpec', + 'AutoscalingMetricSpec', + }, +) + + +class MachineSpec(proto.Message): + r"""Specification of a single machine. + Attributes: + machine_type (str): + Immutable. The type of the machine. + + See the `list of machine types supported for + prediction `__ + + See the `list of machine types supported for custom + training `__. + + For + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] + this field is optional, and the default value is + ``n1-standard-2``. For + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob] + or as part of + [WorkerPoolSpec][google.cloud.aiplatform.v1.WorkerPoolSpec] + this field is required. + accelerator_type (google.cloud.aiplatform_v1.types.AcceleratorType): + Immutable. The type of accelerator(s) that may be attached + to the machine as per + [accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count]. + accelerator_count (int): + The number of accelerators to attach to the + machine. + """ + + machine_type = proto.Field( + proto.STRING, + number=1, + ) + accelerator_type = proto.Field( + proto.ENUM, + number=2, + enum=gca_accelerator_type.AcceleratorType, + ) + accelerator_count = proto.Field( + proto.INT32, + number=3, + ) + + +class DedicatedResources(proto.Message): + r"""A description of resources that are dedicated to a + DeployedModel, and that need a higher degree of manual + configuration. + + Attributes: + machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): + Required. Immutable. The specification of a + single machine used by the prediction. + min_replica_count (int): + Required. Immutable. The minimum number of + machine replicas this DeployedModel will be + always deployed on. This value must be greater + than or equal to 1. + If traffic against the DeployedModel increases, + it may dynamically be deployed onto more + replicas, and as traffic decreases, some of + these extra replicas may be freed. + max_replica_count (int): + Immutable. The maximum number of replicas this DeployedModel + may be deployed on when the traffic against it increases. If + the requested value is too large, the deployment will error, + but if deployment succeeds then the ability to scale the + model to that many replicas is guaranteed (barring service + outages). If traffic against the DeployedModel increases + beyond what its replicas at maximum may handle, a portion of + the traffic will be dropped. If this value is not provided, + will use + [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] + as the default value. + autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1.types.AutoscalingMetricSpec]): + Immutable. The metric specifications that overrides a + resource utilization metric (CPU utilization, accelerator's + duty cycle, and so on) target value (default to 60 if not + set). At most one entry is allowed per metric. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] + is above 0, the autoscaling will be based on both CPU + utilization and accelerator's duty cycle metrics and scale + up when either metrics exceeds its target value while scale + down if both metrics are under their target value. The + default target value is 60 for both metrics. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] + is 0, the autoscaling will be based on CPU utilization + metric only with default target value 60 if not explicitly + set. + + For example, in the case of Online Prediction, if you want + to override target CPU utilization to 80, you should set + [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1.AutoscalingMetricSpec.metric_name] + to + ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + and + [autoscaling_metric_specs.target][google.cloud.aiplatform.v1.AutoscalingMetricSpec.target] + to ``80``. + """ + + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message='MachineSpec', + ) + min_replica_count = proto.Field( + proto.INT32, + number=2, + ) + max_replica_count = proto.Field( + proto.INT32, + number=3, + ) + autoscaling_metric_specs = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='AutoscalingMetricSpec', + ) + + +class AutomaticResources(proto.Message): + r"""A description of resources that to large degree are decided + by Vertex AI, and require only a modest additional + configuration. Each Model supporting these resources documents + its specific guidelines. + + Attributes: + min_replica_count (int): + Immutable. The minimum number of replicas this DeployedModel + will be always deployed on. If traffic against it increases, + it may dynamically be deployed onto more replicas up to + [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], + and as traffic decreases, some of these extra replicas may + be freed. If the requested value is too large, the + deployment will error. + max_replica_count (int): + Immutable. The maximum number of replicas + this DeployedModel may be deployed on when the + traffic against it increases. If the requested + value is too large, the deployment will error, + but if deployment succeeds then the ability to + scale the model to that many replicas is + guaranteed (barring service outages). If traffic + against the DeployedModel increases beyond what + its replicas at maximum may handle, a portion of + the traffic will be dropped. If this value is + not provided, a no upper bound for scaling under + heavy traffic will be assume, though Vertex AI + may be unable to scale beyond certain replica + number. + """ + + min_replica_count = proto.Field( + proto.INT32, + number=1, + ) + max_replica_count = proto.Field( + proto.INT32, + number=2, + ) + + +class BatchDedicatedResources(proto.Message): + r"""A description of resources that are used for performing batch + operations, are dedicated to a Model, and need manual + configuration. + + Attributes: + machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): + Required. Immutable. The specification of a + single machine. + starting_replica_count (int): + Immutable. The number of machine replicas used at the start + of the batch operation. If not set, Vertex AI decides + starting number, not greater than + [max_replica_count][google.cloud.aiplatform.v1.BatchDedicatedResources.max_replica_count] + max_replica_count (int): + Immutable. The maximum number of machine + replicas the batch operation may be scaled to. + The default value is 10. + """ + + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message='MachineSpec', + ) + starting_replica_count = proto.Field( + proto.INT32, + number=2, + ) + max_replica_count = proto.Field( + proto.INT32, + number=3, + ) + + +class ResourcesConsumed(proto.Message): + r"""Statistics information about resource consumption. + Attributes: + replica_hours (float): + Output only. The number of replica hours + used. Note that many replicas may run in + parallel, and additionally any given work may be + queued for some time. Therefore this value is + not strictly related to wall time. + """ + + replica_hours = proto.Field( + proto.DOUBLE, + number=1, + ) + + +class DiskSpec(proto.Message): + r"""Represents the spec of disk options. + Attributes: + boot_disk_type (str): + Type of the boot disk (default is "pd-ssd"). + Valid values: "pd-ssd" (Persistent Disk Solid + State Drive) or "pd-standard" (Persistent Disk + Hard Disk Drive). + boot_disk_size_gb (int): + Size in GB of the boot disk (default is + 100GB). + """ + + boot_disk_type = proto.Field( + proto.STRING, + number=1, + ) + boot_disk_size_gb = proto.Field( + proto.INT32, + number=2, + ) + + +class AutoscalingMetricSpec(proto.Message): + r"""The metric specification that defines the target resource + utilization (CPU utilization, accelerator's duty cycle, and so + on) for calculating the desired replica count. + + Attributes: + metric_name (str): + Required. The resource metric name. Supported metrics: + + - For Online Prediction: + - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` + - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + target (int): + The target resource utilization in percentage + (1% - 100%) for the given metric; once the real + usage deviates from the target by a certain + percentage, the machine replicas change. The + default value is 60 (representing 60%) if not + provided. + """ + + metric_name = proto.Field( + proto.STRING, + number=1, + ) + target = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py new file mode 100644 index 0000000000..9257b29e74 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ManualBatchTuningParameters', + }, +) + + +class ManualBatchTuningParameters(proto.Message): + r"""Manual batch tuning parameters. + Attributes: + batch_size (int): + Immutable. The number of the records (e.g. + instances) of the operation given in each batch + to a machine replica. Machine type, and size of + a single record should be considered when + setting this parameter, higher value speeds up + the batch operation's execution, but too high + value will result in a whole batch not fitting + in a machine's memory, and the whole operation + will fail. + The default value is 4. + """ + + batch_size = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py new file mode 100644 index 0000000000..2ac7b98679 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'MigratableResource', + }, +) + + +class MigratableResource(proto.Message): + r"""Represents one resource that exists in automl.googleapis.com, + datalabeling.googleapis.com or ml.googleapis.com. + + Attributes: + ml_engine_model_version (google.cloud.aiplatform_v1.types.MigratableResource.MlEngineModelVersion): + Output only. Represents one Version in + ml.googleapis.com. + automl_model (google.cloud.aiplatform_v1.types.MigratableResource.AutomlModel): + Output only. Represents one Model in + automl.googleapis.com. + automl_dataset (google.cloud.aiplatform_v1.types.MigratableResource.AutomlDataset): + Output only. Represents one Dataset in + automl.googleapis.com. + data_labeling_dataset (google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset): + Output only. Represents one Dataset in + datalabeling.googleapis.com. + last_migrate_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the last + migration attempt on this MigratableResource + started. Will not be set if there's no migration + attempt on this MigratableResource. + last_update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MigratableResource was last updated. + """ + + class MlEngineModelVersion(proto.Message): + r"""Represents one model Version in ml.googleapis.com. + Attributes: + endpoint (str): + The ml.googleapis.com endpoint that this model Version + currently lives in. Example values: + + - ml.googleapis.com + - us-centrall-ml.googleapis.com + - europe-west4-ml.googleapis.com + - asia-east1-ml.googleapis.com + version (str): + Full resource name of ml engine model Version. Format: + ``projects/{project}/models/{model}/versions/{version}``. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.STRING, + number=2, + ) + + class AutomlModel(proto.Message): + r"""Represents one Model in automl.googleapis.com. + Attributes: + model (str): + Full resource name of automl Model. Format: + ``projects/{project}/locations/{location}/models/{model}``. + model_display_name (str): + The Model's display name in + automl.googleapis.com. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_display_name = proto.Field( + proto.STRING, + number=3, + ) + + class AutomlDataset(proto.Message): + r"""Represents one Dataset in automl.googleapis.com. + Attributes: + dataset (str): + Full resource name of automl Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}``. + dataset_display_name (str): + The Dataset's display name in + automl.googleapis.com. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=4, + ) + + class DataLabelingDataset(proto.Message): + r"""Represents one Dataset in datalabeling.googleapis.com. + Attributes: + dataset (str): + Full resource name of data labeling Dataset. Format: + ``projects/{project}/datasets/{dataset}``. + dataset_display_name (str): + The Dataset's display name in + datalabeling.googleapis.com. + data_labeling_annotated_datasets (Sequence[google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset]): + The migratable AnnotatedDataset in + datalabeling.googleapis.com belongs to the data + labeling Dataset. + """ + + class DataLabelingAnnotatedDataset(proto.Message): + r"""Represents one AnnotatedDataset in + datalabeling.googleapis.com. + + Attributes: + annotated_dataset (str): + Full resource name of data labeling AnnotatedDataset. + Format: + ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. + annotated_dataset_display_name (str): + The AnnotatedDataset's display name in + datalabeling.googleapis.com. + """ + + annotated_dataset = proto.Field( + proto.STRING, + number=1, + ) + annotated_dataset_display_name = proto.Field( + proto.STRING, + number=3, + ) + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=4, + ) + data_labeling_annotated_datasets = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset', + ) + + ml_engine_model_version = proto.Field( + proto.MESSAGE, + number=1, + oneof='resource', + message=MlEngineModelVersion, + ) + automl_model = proto.Field( + proto.MESSAGE, + number=2, + oneof='resource', + message=AutomlModel, + ) + automl_dataset = proto.Field( + proto.MESSAGE, + number=3, + oneof='resource', + message=AutomlDataset, + ) + data_labeling_dataset = proto.Field( + proto.MESSAGE, + number=4, + oneof='resource', + message=DataLabelingDataset, + ) + last_migrate_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + last_update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py new file mode 100644 index 0000000000..80192fc6e7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py @@ -0,0 +1,439 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import migratable_resource as gca_migratable_resource +from google.cloud.aiplatform_v1.types import operation +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'BatchMigrateResourcesRequest', + 'MigrateResourceRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceResponse', + 'BatchMigrateResourcesOperationMetadata', + }, +) + + +class SearchMigratableResourcesRequest(proto.Message): + r"""Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + + Attributes: + parent (str): + Required. The location that the migratable resources should + be searched from. It's the Vertex AI location that the + resources can be migrated to, not the resources' original + location. Format: + ``projects/{project}/locations/{location}`` + page_size (int): + The standard page size. + The default and maximum value is 100. + page_token (str): + The standard page token. + filter (str): + A filter for your search. You can use the following types of + filters: + + - Resource type filters. The following strings filter for a + specific type of + [MigratableResource][google.cloud.aiplatform.v1.MigratableResource]: + + - ``ml_engine_model_version:*`` + - ``automl_model:*`` + - ``automl_dataset:*`` + - ``data_labeling_dataset:*`` + + - "Migrated or not" filters. The following strings filter + for resources that either have or have not already been + migrated: + + - ``last_migrate_time:*`` filters for migrated + resources. + - ``NOT last_migrate_time:*`` filters for not yet + migrated resources. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class SearchMigratableResourcesResponse(proto.Message): + r"""Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. + + Attributes: + migratable_resources (Sequence[google.cloud.aiplatform_v1.types.MigratableResource]): + All migratable resources that can be migrated + to the location specified in the request. + next_page_token (str): + The standard next-page token. The migratable_resources may + not fill page_size in SearchMigratableResourcesRequest even + when there are subsequent pages. + """ + + @property + def raw_page(self): + return self + + migratable_resources = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_migratable_resource.MigratableResource, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class BatchMigrateResourcesRequest(proto.Message): + r"""Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + Attributes: + parent (str): + Required. The location of the migrated resource will live + in. Format: ``projects/{project}/locations/{location}`` + migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]): + Required. The request messages specifying the + resources to migrate. They must be in the same + location as the destination. Up to 50 resources + can be migrated in one batch. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + migrate_resource_requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='MigrateResourceRequest', + ) + + +class MigrateResourceRequest(proto.Message): + r"""Config of migrating one resource from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + Attributes: + migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): + Config for migrating Version in + ml.googleapis.com to Vertex AI's Model. + migrate_automl_model_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlModelConfig): + Config for migrating Model in + automl.googleapis.com to Vertex AI's Model. + migrate_automl_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): + Config for migrating Dataset in + automl.googleapis.com to Vertex AI's Dataset. + migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): + Config for migrating Dataset in + datalabeling.googleapis.com to Vertex AI's + Dataset. + """ + + class MigrateMlEngineModelVersionConfig(proto.Message): + r"""Config for migrating version in ml.googleapis.com to Vertex + AI's Model. + + Attributes: + endpoint (str): + Required. The ml.googleapis.com endpoint that this model + version should be migrated from. Example values: + + - ml.googleapis.com + + - us-centrall-ml.googleapis.com + + - europe-west4-ml.googleapis.com + + - asia-east1-ml.googleapis.com + model_version (str): + Required. Full resource name of ml engine model version. + Format: + ``projects/{project}/models/{model}/versions/{version}``. + model_display_name (str): + Required. Display name of the model in Vertex + AI. System will pick a display name if + unspecified. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + model_version = proto.Field( + proto.STRING, + number=2, + ) + model_display_name = proto.Field( + proto.STRING, + number=3, + ) + + class MigrateAutomlModelConfig(proto.Message): + r"""Config for migrating Model in automl.googleapis.com to Vertex + AI's Model. + + Attributes: + model (str): + Required. Full resource name of automl Model. Format: + ``projects/{project}/locations/{location}/models/{model}``. + model_display_name (str): + Optional. Display name of the model in Vertex + AI. System will pick a display name if + unspecified. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_display_name = proto.Field( + proto.STRING, + number=2, + ) + + class MigrateAutomlDatasetConfig(proto.Message): + r"""Config for migrating Dataset in automl.googleapis.com to + Vertex AI's Dataset. + + Attributes: + dataset (str): + Required. Full resource name of automl Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}``. + dataset_display_name (str): + Required. Display name of the Dataset in + Vertex AI. System will pick a display name if + unspecified. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=2, + ) + + class MigrateDataLabelingDatasetConfig(proto.Message): + r"""Config for migrating Dataset in datalabeling.googleapis.com + to AI Platform's Dataset. + + Attributes: + dataset (str): + Required. Full resource name of data labeling Dataset. + Format: ``projects/{project}/datasets/{dataset}``. + dataset_display_name (str): + Optional. Display name of the Dataset in + Vertex AI. System will pick a display name if + unspecified. + migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): + Optional. Configs for migrating + AnnotatedDataset in datalabeling.googleapis.com + to Vertex AI's SavedQuery. The specified + AnnotatedDatasets have to belong to the + datalabeling Dataset. + """ + + class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): + r"""Config for migrating AnnotatedDataset in + datalabeling.googleapis.com to Vertex AI's SavedQuery. + + Attributes: + annotated_dataset (str): + Required. Full resource name of data labeling + AnnotatedDataset. Format: + ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. + """ + + annotated_dataset = proto.Field( + proto.STRING, + number=1, + ) + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=2, + ) + migrate_data_labeling_annotated_dataset_configs = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig', + ) + + migrate_ml_engine_model_version_config = proto.Field( + proto.MESSAGE, + number=1, + oneof='request', + message=MigrateMlEngineModelVersionConfig, + ) + migrate_automl_model_config = proto.Field( + proto.MESSAGE, + number=2, + oneof='request', + message=MigrateAutomlModelConfig, + ) + migrate_automl_dataset_config = proto.Field( + proto.MESSAGE, + number=3, + oneof='request', + message=MigrateAutomlDatasetConfig, + ) + migrate_data_labeling_dataset_config = proto.Field( + proto.MESSAGE, + number=4, + oneof='request', + message=MigrateDataLabelingDatasetConfig, + ) + + +class BatchMigrateResourcesResponse(proto.Message): + r"""Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + Attributes: + migrate_resource_responses (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceResponse]): + Successfully migrated resources. + """ + + migrate_resource_responses = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='MigrateResourceResponse', + ) + + +class MigrateResourceResponse(proto.Message): + r"""Describes a successfully migrated resource. + Attributes: + dataset (str): + Migrated Dataset's resource name. + model (str): + Migrated Model's resource name. + migratable_resource (google.cloud.aiplatform_v1.types.MigratableResource): + Before migration, the identifier in + ml.googleapis.com, automl.googleapis.com or + datalabeling.googleapis.com. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + oneof='migrated_resource', + ) + model = proto.Field( + proto.STRING, + number=2, + oneof='migrated_resource', + ) + migratable_resource = proto.Field( + proto.MESSAGE, + number=3, + message=gca_migratable_resource.MigratableResource, + ) + + +class BatchMigrateResourcesOperationMetadata(proto.Message): + r"""Runtime operation information for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + partial_results (Sequence[google.cloud.aiplatform_v1.types.BatchMigrateResourcesOperationMetadata.PartialResult]): + Partial results that reflect the latest + migration operation progress. + """ + + class PartialResult(proto.Message): + r"""Represents a partial result in batch migration operation for one + [MigrateResourceRequest][google.cloud.aiplatform.v1.MigrateResourceRequest]. + + Attributes: + error (google.rpc.status_pb2.Status): + The error result of the migration request in + case of failure. + model (str): + Migrated model resource name. + dataset (str): + Migrated dataset resource name. + request (google.cloud.aiplatform_v1.types.MigrateResourceRequest): + It's the same as the value in + [MigrateResourceRequest.migrate_resource_requests][]. + """ + + error = proto.Field( + proto.MESSAGE, + number=2, + oneof='result', + message=status_pb2.Status, + ) + model = proto.Field( + proto.STRING, + number=3, + oneof='result', + ) + dataset = proto.Field( + proto.STRING, + number=4, + oneof='result', + ) + request = proto.Field( + proto.MESSAGE, + number=1, + message='MigrateResourceRequest', + ) + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + partial_results = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=PartialResult, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py new file mode 100644 index 0000000000..5a2c50c153 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py @@ -0,0 +1,707 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Model', + 'PredictSchemata', + 'ModelContainerSpec', + 'Port', + }, +) + + +class Model(proto.Message): + r"""A trained machine learning Model. + Attributes: + name (str): + The resource name of the Model. + display_name (str): + Required. The display name of the Model. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Model. + predict_schemata (google.cloud.aiplatform_v1.types.PredictSchemata): + The schemata that describe formats of the Model's + predictions and explanations as given and returned via + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] + and [PredictionService.Explain][]. + metadata_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing additional information about the Model, + that is specific to it. Unset if the Model does not have any + additional information. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no additional metadata is needed, this field is set to an + empty string. Note: The URI given on output will be + immutable and probably different, including the URI scheme, + than the one given on input. The output URI will point to a + location where the user only has a read access. + metadata (google.protobuf.struct_pb2.Value): + Immutable. An additional information about the Model; the + schema of the metadata can be found in + [metadata_schema][google.cloud.aiplatform.v1.Model.metadata_schema_uri]. + Unset if the Model does not have any additional information. + supported_export_formats (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat]): + Output only. The formats in which this Model + may be exported. If empty, this Model is not + available for export. + training_pipeline (str): + Output only. The resource name of the + TrainingPipeline that uploaded this Model, if + any. + container_spec (google.cloud.aiplatform_v1.types.ModelContainerSpec): + Input only. The specification of the container that is to be + used when deploying this Model. The specification is + ingested upon + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], + and all binaries it contains are copied and stored + internally by Vertex AI. Not present for AutoML Models. + artifact_uri (str): + Immutable. The path to the directory + containing the Model artifact and any of its + supporting files. Not present for AutoML Models. + supported_deployment_resources_types (Sequence[google.cloud.aiplatform_v1.types.Model.DeploymentResourcesType]): + Output only. When this Model is deployed, its prediction + resources are described by the ``prediction_resources`` + field of the + [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + object. Because not all Models support all resource + configuration types, the configuration types this Model + supports are listed here. If no configuration types are + listed, the Model cannot be deployed to an + [Endpoint][google.cloud.aiplatform.v1.Endpoint] and does not + support online predictions + ([PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] + or [PredictionService.Explain][]). Such a Model can serve + predictions by using a + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob], + if it has at least one entry each in + [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] + and + [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. + supported_input_storage_formats (Sequence[str]): + Output only. The formats this Model supports in + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + If + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + exists, the instances should be given as per that schema. + + The possible formats are: + + - ``jsonl`` The JSON Lines format, where each instance is a + single line. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``csv`` The CSV format, where each instance is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``tf-record`` The TFRecord format, where each instance is + a single record in tfrecord syntax. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``tf-record-gzip`` Similar to ``tf-record``, but the file + is gzipped. Uses + [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``bigquery`` Each instance is a single row in BigQuery. + Uses + [BigQuerySource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.bigquery_source]. + + - ``file-list`` Each line of the file is the location of an + instance to process, uses ``gcs_source`` field of the + [InputConfig][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig] + object. + + If this Model doesn't support any of these formats it means + it cannot be used with a + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + However, if it has + [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], + it could serve online predictions by using + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] + or [PredictionService.Explain][]. + supported_output_storage_formats (Sequence[str]): + Output only. The formats this Model supports in + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. + If both + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] + and + [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri] + exist, the predictions are returned together with their + instances. In other words, the prediction has the original + instance data first, followed by the actual prediction + content (as per the schema). + + The possible formats are: + + - ``jsonl`` The JSON Lines format, where each prediction is + a single line. Uses + [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. + + - ``csv`` The CSV format, where each prediction is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. + + - ``bigquery`` Each prediction is a single row in a + BigQuery table, uses + [BigQueryDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.bigquery_destination] + . + + If this Model doesn't support any of these formats it means + it cannot be used with a + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + However, if it has + [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], + it could serve online predictions by using + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] + or [PredictionService.Explain][]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Model was + uploaded into Vertex AI. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Model was + most recently updated. + deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModelRef]): + Output only. The pointers to DeployedModels + created from this Model. Note that Model could + have been deployed to Endpoints in different + Locations. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1.types.Model.LabelsEntry]): + The labels with user-defined metadata to + organize your Models. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Model. If set, this Model and all sub-resources + of this Model will be secured by this key. + """ + class DeploymentResourcesType(proto.Enum): + r"""Identifies a type of Model's prediction resources.""" + DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0 + DEDICATED_RESOURCES = 1 + AUTOMATIC_RESOURCES = 2 + + class ExportFormat(proto.Message): + r"""Represents export format supported by the Model. + All formats export to Google Cloud Storage. + + Attributes: + id (str): + Output only. The ID of the export format. The possible + format IDs are: + + - ``tflite`` Used for Android mobile devices. + + - ``edgetpu-tflite`` Used for `Edge + TPU `__ devices. + + - ``tf-saved-model`` A tensorflow model in SavedModel + format. + + - ``tf-js`` A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. + + - ``core-ml`` Used for iOS mobile devices. + + - ``custom-trained`` A Model that was uploaded or trained + by custom code. + exportable_contents (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat.ExportableContent]): + Output only. The content of this Model that + may be exported. + """ + class ExportableContent(proto.Enum): + r"""The Model content that can be exported.""" + EXPORTABLE_CONTENT_UNSPECIFIED = 0 + ARTIFACT = 1 + IMAGE = 2 + + id = proto.Field( + proto.STRING, + number=1, + ) + exportable_contents = proto.RepeatedField( + proto.ENUM, + number=2, + enum='Model.ExportFormat.ExportableContent', + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + predict_schemata = proto.Field( + proto.MESSAGE, + number=4, + message='PredictSchemata', + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=5, + ) + metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + supported_export_formats = proto.RepeatedField( + proto.MESSAGE, + number=20, + message=ExportFormat, + ) + training_pipeline = proto.Field( + proto.STRING, + number=7, + ) + container_spec = proto.Field( + proto.MESSAGE, + number=9, + message='ModelContainerSpec', + ) + artifact_uri = proto.Field( + proto.STRING, + number=26, + ) + supported_deployment_resources_types = proto.RepeatedField( + proto.ENUM, + number=10, + enum=DeploymentResourcesType, + ) + supported_input_storage_formats = proto.RepeatedField( + proto.STRING, + number=11, + ) + supported_output_storage_formats = proto.RepeatedField( + proto.STRING, + number=12, + ) + create_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + deployed_models = proto.RepeatedField( + proto.MESSAGE, + number=15, + message=deployed_model_ref.DeployedModelRef, + ) + etag = proto.Field( + proto.STRING, + number=16, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=17, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=24, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class PredictSchemata(proto.Message): + r"""Contains the schemata used in Model's predictions and explanations + via + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict], + [PredictionService.Explain][] and + [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. + + Attributes: + instance_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the format of a single instance, which + are used in + [PredictRequest.instances][google.cloud.aiplatform.v1.PredictRequest.instances], + [ExplainRequest.instances][] and + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + parameters_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the parameters of prediction and + explanation via + [PredictRequest.parameters][google.cloud.aiplatform.v1.PredictRequest.parameters], + [ExplainRequest.parameters][] and + [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1.BatchPredictionJob.model_parameters]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no parameters are supported, then it is set to an empty + string. Note: The URI given on output will be immutable and + probably different, including the URI scheme, than the one + given on input. The output URI will point to a location + where the user only has a read access. + prediction_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the format of a single prediction + produced by this Model, which are returned via + [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions], + [ExplainResponse.explanations][], and + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + """ + + instance_schema_uri = proto.Field( + proto.STRING, + number=1, + ) + parameters_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + prediction_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + + +class ModelContainerSpec(proto.Message): + r"""Specification of a container for serving predictions. Some fields in + this message correspond to fields in the `Kubernetes Container v1 + core + specification `__. + + Attributes: + image_uri (str): + Required. Immutable. URI of the Docker image to be used as + the custom container for serving predictions. This URI must + identify an image in Artifact Registry or Container + Registry. Learn more about the `container publishing + requirements `__, + including permissions requirements for the AI Platform + Service Agent. + + The container image is ingested upon + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], + stored internally, and this original path is afterwards not + used. + + To learn about the requirements for the Docker image itself, + see `Custom container + requirements `__. + + You can use the URI to one of Vertex AI's `pre-built + container images for + prediction `__ + in this field. + command (Sequence[str]): + Immutable. Specifies the command that runs when the + container starts. This overrides the container's + `ENTRYPOINT `__. + Specify this field as an array of executable and arguments, + similar to a Docker ``ENTRYPOINT``'s "exec" form, not its + "shell" form. + + If you do not specify this field, then the container's + ``ENTRYPOINT`` runs, in conjunction with the + [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] + field or the container's + ```CMD`` `__, + if either exists. If this field is not specified and the + container does not have an ``ENTRYPOINT``, then refer to the + Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` + interact `__. + + If you specify this field, then you can also specify the + ``args`` field to provide additional arguments for this + command. However, if you specify this field, then the + container's ``CMD`` is ignored. See the `Kubernetes + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. + + In this field, you can reference `environment variables set + by Vertex + AI `__ + and environment variables set in the + [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] + field. You cannot reference environment variables set in the + Docker image. In order for environment variables to be + expanded, reference them by using the following syntax: + $(VARIABLE_NAME) Note that this differs from Bash variable + expansion, which does not use parentheses. If a variable + cannot be resolved, the reference in the input string is + used unchanged. To avoid variable expansion, you can escape + this syntax with ``$$``; for example: $$(VARIABLE_NAME) This + field corresponds to the ``command`` field of the Kubernetes + Containers `v1 core + API `__. + args (Sequence[str]): + Immutable. Specifies arguments for the command that runs + when the container starts. This overrides the container's + ```CMD`` `__. + Specify this field as an array of executable and arguments, + similar to a Docker ``CMD``'s "default parameters" form. + + If you don't specify this field but do specify the + [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] + field, then the command from the ``command`` field runs + without any additional arguments. See the `Kubernetes + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. + + If you don't specify this field and don't specify the + ``command`` field, then the container's + ```ENTRYPOINT`` `__ + and ``CMD`` determine what runs based on their default + behavior. See the Docker documentation about `how ``CMD`` + and ``ENTRYPOINT`` + interact `__. + + In this field, you can reference `environment variables set + by Vertex + AI `__ + and environment variables set in the + [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] + field. You cannot reference environment variables set in the + Docker image. In order for environment variables to be + expanded, reference them by using the following syntax: + $(VARIABLE_NAME) Note that this differs from Bash variable + expansion, which does not use parentheses. If a variable + cannot be resolved, the reference in the input string is + used unchanged. To avoid variable expansion, you can escape + this syntax with ``$$``; for example: $$(VARIABLE_NAME) This + field corresponds to the ``args`` field of the Kubernetes + Containers `v1 core + API `__. + env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): + Immutable. List of environment variables to set in the + container. After the container starts running, code running + in the container can read these environment variables. + + Additionally, the + [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] + and + [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] + fields can reference these variables. Later entries in this + list can also reference earlier entries. For example, the + following example sets the variable ``VAR_2`` to have the + value ``foo bar``: + + .. code:: json + + [ + { + "name": "VAR_1", + "value": "foo" + }, + { + "name": "VAR_2", + "value": "$(VAR_1) bar" + } + ] + + If you switch the order of the variables in the example, + then the expansion does not occur. + + This field corresponds to the ``env`` field of the + Kubernetes Containers `v1 core + API `__. + ports (Sequence[google.cloud.aiplatform_v1.types.Port]): + Immutable. List of ports to expose from the container. + Vertex AI sends any prediction requests that it receives to + the first port on this list. AI Platform also sends + `liveness and health + checks `__ + to this port. + + If you do not specify this field, it defaults to following + value: + + .. code:: json + + [ + { + "containerPort": 8080 + } + ] + + Vertex AI does not use ports other than the first one + listed. This field corresponds to the ``ports`` field of the + Kubernetes Containers `v1 core + API `__. + predict_route (str): + Immutable. HTTP path on the container to send prediction + requests to. Vertex AI forwards requests sent using + [projects.locations.endpoints.predict][google.cloud.aiplatform.v1.PredictionService.Predict] + to this path on the container's IP address and port. Vertex + AI then returns the container's response in the API + response. + + For example, if you set this field to ``/foo``, then when + Vertex AI receives a prediction request, it forwards the + request body in a POST request to the ``/foo`` path on the + port of your container specified by the first value of this + ``ModelContainerSpec``'s + [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] + field. + + If you don't specify this field, it defaults to the + following value when you [deploy this Model to an + Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]: + /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + The placeholders in this value are replaced as follows: + + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) + health_route (str): + Immutable. HTTP path on the container to send health checks + to. Vertex AI intermittently sends GET requests to this path + on the container's IP address and port to check that the + container is healthy. Read more about `health + checks `__. + + For example, if you set this field to ``/bar``, then Vertex + AI intermittently sends a GET request to the ``/bar`` path + on the port of your container specified by the first value + of this ``ModelContainerSpec``'s + [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] + field. + + If you don't specify this field, it defaults to the + following value when you [deploy this Model to an + Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]: + /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + The placeholders in this value are replaced as follows: + + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) + """ + + image_uri = proto.Field( + proto.STRING, + number=1, + ) + command = proto.RepeatedField( + proto.STRING, + number=2, + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=env_var.EnvVar, + ) + ports = proto.RepeatedField( + proto.MESSAGE, + number=5, + message='Port', + ) + predict_route = proto.Field( + proto.STRING, + number=6, + ) + health_route = proto.Field( + proto.STRING, + number=7, + ) + + +class Port(proto.Message): + r"""Represents a network port in a container. + Attributes: + container_port (int): + The number of the port to expose on the pod's + IP address. Must be a valid port number, between + 1 and 65535 inclusive. + """ + + container_port = proto.Field( + proto.INT32, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py new file mode 100644 index 0000000000..190ec683ca --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ModelEvaluation', + }, +) + + +class ModelEvaluation(proto.Message): + r"""A collection of metrics calculated by comparing Model's + predictions on all of the test data against annotations from the + test data. + + Attributes: + name (str): + Output only. The resource name of the + ModelEvaluation. + metrics_schema_uri (str): + Output only. Points to a YAML file stored on Google Cloud + Storage describing the + [metrics][google.cloud.aiplatform.v1.ModelEvaluation.metrics] + of this ModelEvaluation. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + metrics (google.protobuf.struct_pb2.Value): + Output only. Evaluation metrics of the Model. The schema of + the metrics is stored in + [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluation.metrics_schema_uri] + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelEvaluation was created. + slice_dimensions (Sequence[str]): + Output only. All possible + [dimensions][ModelEvaluationSlice.slice.dimension] of + ModelEvaluationSlices. The dimensions can be used as the + filter of the + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] + request, in the form of ``slice.dimension = ``. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + metrics_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + metrics = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + slice_dimensions = proto.RepeatedField( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py new file mode 100644 index 0000000000..9a553e320a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'ModelEvaluationSlice', + }, +) + + +class ModelEvaluationSlice(proto.Message): + r"""A collection of metrics calculated by comparing Model's + predictions on a slice of the test data against ground truth + annotations. + + Attributes: + name (str): + Output only. The resource name of the + ModelEvaluationSlice. + slice_ (google.cloud.aiplatform_v1.types.ModelEvaluationSlice.Slice): + Output only. The slice of the test data that + is used to evaluate the Model. + metrics_schema_uri (str): + Output only. Points to a YAML file stored on Google Cloud + Storage describing the + [metrics][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics] + of this ModelEvaluationSlice. The schema is defined as an + OpenAPI 3.0.2 `Schema + Object `__. + metrics (google.protobuf.struct_pb2.Value): + Output only. Sliced evaluation metrics of the Model. The + schema of the metrics is stored in + [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics_schema_uri] + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelEvaluationSlice was created. + """ + + class Slice(proto.Message): + r"""Definition of a slice. + Attributes: + dimension (str): + Output only. The dimension of the slice. Well-known + dimensions are: + + - ``annotationSpec``: This slice is on the test data that + has either ground truth or prediction with + [AnnotationSpec.display_name][google.cloud.aiplatform.v1.AnnotationSpec.display_name] + equals to + [value][google.cloud.aiplatform.v1.ModelEvaluationSlice.Slice.value]. + value (str): + Output only. The value of the dimension in + this slice. + """ + + dimension = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + slice_ = proto.Field( + proto.MESSAGE, + number=2, + message=Slice, + ) + metrics_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + metrics = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py new file mode 100644 index 0000000000..088fd612c3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py @@ -0,0 +1,583 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'UploadModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelResponse', + 'GetModelRequest', + 'ListModelsRequest', + 'ListModelsResponse', + 'UpdateModelRequest', + 'DeleteModelRequest', + 'ExportModelRequest', + 'ExportModelOperationMetadata', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'GetModelEvaluationSliceRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + }, +) + + +class UploadModelRequest(proto.Message): + r"""Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. + + Attributes: + parent (str): + Required. The resource name of the Location into which to + upload the Model. Format: + ``projects/{project}/locations/{location}`` + model (google.cloud.aiplatform_v1.types.Model): + Required. The Model to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + model = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model.Model, + ) + + +class UploadModelOperationMetadata(proto.Message): + r"""Details of + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UploadModelResponse(proto.Message): + r"""Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] + operation. + + Attributes: + model (str): + The name of the uploaded Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + + +class GetModelRequest(proto.Message): + r"""Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. + + Attributes: + name (str): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelsRequest(proto.Message): + r"""Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``model`` supports = and !=. ``model`` represents the + Model ID, i.e. the last segment of the Model's [resource + name][google.cloud.aiplatform.v1.Model.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``model=1234`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelsResponse.next_page_token] + of the previous + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListModelsResponse(proto.Message): + r"""Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] + + Attributes: + models (Sequence[google.cloud.aiplatform_v1.types.Model]): + List of Models in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelsRequest.page_token][google.cloud.aiplatform.v1.ListModelsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + models = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateModelRequest(proto.Message): + r"""Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. + + Attributes: + model (google.cloud.aiplatform_v1.types.Model): + Required. The Model which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + model = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteModelRequest(proto.Message): + r"""Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. + + Attributes: + name (str): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ExportModelRequest(proto.Message): + r"""Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. + + Attributes: + name (str): + Required. The resource name of the Model to export. Format: + ``projects/{project}/locations/{location}/models/{model}`` + output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig): + Required. The desired output location and + configuration. + """ + + class OutputConfig(proto.Message): + r"""Output configuration for the Model export. + Attributes: + export_format_id (str): + The ID of the format in which the Model must be exported. + Each Model lists the [export formats it + supports][google.cloud.aiplatform.v1.Model.supported_export_formats]. + If no value is provided here, then the first from the list + of the Model's supported formats is used by default. + artifact_destination (google.cloud.aiplatform_v1.types.GcsDestination): + The Cloud Storage location where the Model artifact is to be + written to. Under the directory given as the destination a + new one with name + "``model-export--``", + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format, will be created. Inside, the Model and any of its + supporting files will be written. This field should only be + set when the ``exportableContent`` field of the + [Model.supported_export_formats] object contains + ``ARTIFACT``. + image_destination (google.cloud.aiplatform_v1.types.ContainerRegistryDestination): + The Google Container Registry or Artifact Registry uri where + the Model container image will be copied to. This field + should only be set when the ``exportableContent`` field of + the [Model.supported_export_formats] object contains + ``IMAGE``. + """ + + export_format_id = proto.Field( + proto.STRING, + number=1, + ) + artifact_destination = proto.Field( + proto.MESSAGE, + number=3, + message=io.GcsDestination, + ) + image_destination = proto.Field( + proto.MESSAGE, + number=4, + message=io.ContainerRegistryDestination, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + output_config = proto.Field( + proto.MESSAGE, + number=2, + message=OutputConfig, + ) + + +class ExportModelOperationMetadata(proto.Message): + r"""Details of + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + output_info (google.cloud.aiplatform_v1.types.ExportModelOperationMetadata.OutputInfo): + Output only. Information further describing + the output of this Model export. + """ + + class OutputInfo(proto.Message): + r"""Further describes the output of the ExportModel. Supplements + [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1.ExportModelRequest.OutputConfig]. + + Attributes: + artifact_output_uri (str): + Output only. If the Model artifact is being + exported to Google Cloud Storage this is the + full path of the directory created, into which + the Model files are being written to. + image_output_uri (str): + Output only. If the Model image is being + exported to Google Container Registry or + Artifact Registry this is the full path of the + image created. + """ + + artifact_output_uri = proto.Field( + proto.STRING, + number=2, + ) + image_output_uri = proto.Field( + proto.STRING, + number=3, + ) + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + output_info = proto.Field( + proto.MESSAGE, + number=2, + message=OutputInfo, + ) + + +class ExportModelResponse(proto.Message): + r"""Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] + operation. + """ + + +class GetModelEvaluationRequest(proto.Message): + r"""Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. + + Attributes: + name (str): + Required. The name of the ModelEvaluation resource. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationsRequest(proto.Message): + r"""Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + + Attributes: + parent (str): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationsResponse.next_page_token] + of the previous + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelEvaluationsResponse(proto.Message): + r"""Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. + + Attributes: + model_evaluations (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluation]): + List of ModelEvaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model_evaluation.ModelEvaluation, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetModelEvaluationSliceRequest(proto.Message): + r"""Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. + + Attributes: + name (str): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationSlicesRequest(proto.Message): + r"""Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + + Attributes: + parent (str): + Required. The resource name of the ModelEvaluation to list + the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + filter (str): + The standard list filter. + + - ``slice.dimension`` - for =. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesResponse.next_page_token] + of the previous + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelEvaluationSlicesResponse(proto.Message): + r"""Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. + + Attributes: + model_evaluation_slices (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]): + List of ModelEvaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluation_slices = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model_evaluation_slice.ModelEvaluationSlice, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py new file mode 100644 index 0000000000..b5d0e5b613 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'GenericOperationMetadata', + 'DeleteOperationMetadata', + }, +) + + +class GenericOperationMetadata(proto.Message): + r"""Generic Metadata shared by all operations. + Attributes: + partial_failures (Sequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + E.g. single files that couldn't be read. + This field should never exceed 20 entries. + Status details field will contain standard GCP + error details. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + updated for the last time. If the operation has + finished (successfully or not), this is the + finish time. + """ + + partial_failures = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=status_pb2.Status, + ) + create_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteOperationMetadata(proto.Message): + r"""Details of operations that perform deletes of any entities. + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message='GenericOperationMetadata', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py new file mode 100644 index 0000000000..280d02bc17 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -0,0 +1,436 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import execution as gca_execution +from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import value as gca_value +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + }, +) + + +class PipelineJob(proto.Message): + r"""An instance of a machine learning PipelineJob. + Attributes: + name (str): + Output only. The resource name of the + PipelineJob. + display_name (str): + The display name of the Pipeline. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline creation time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline end time. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this PipelineJob + was most recently updated. + pipeline_spec (google.protobuf.struct_pb2.Struct): + Required. The spec of the pipeline. + state (google.cloud.aiplatform_v1.types.PipelineState): + Output only. The detailed state of the job. + job_detail (google.cloud.aiplatform_v1.types.PipelineJobDetail): + Output only. The details of pipeline run. Not + available in the list view. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + pipeline execution. Only populated when the + pipeline's state is FAILED or CANCELLED. + labels (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.LabelsEntry]): + The labels with user-defined metadata to + organize PipelineJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + runtime_config (google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig): + Runtime config of the pipeline. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a + pipelineJob. If set, this PipelineJob and all of + its sub-resources will be secured by this key. + service_account (str): + The service account that the pipeline workload runs as. If + not specified, the Compute Engine default service account in + the project will be used. See + https://cloud.google.com/compute/docs/access/service-accounts#default_service_account + + Users starting the pipeline must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + network (str): + The full name of the Compute Engine + `network `__ + to which the Pipeline Job's workload should be peered. For + example, ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + Private services access must already be configured for the + network. Pipeline job will apply the network configuration + to the GCP resources being launched, if applied, such as + Vertex AI Training or Dataflow job. If left unspecified, the + workload is not peered with any network. + """ + + class RuntimeConfig(proto.Message): + r"""The runtime config of a PipelineJob. + Attributes: + parameters (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig.ParametersEntry]): + The runtime parameters of the PipelineJob. The parameters + will be passed into + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] + to replace the placeholders at runtime. + gcs_output_directory (str): + Required. A path in a Cloud Storage bucket, which will be + treated as the root output directory of the pipeline. It is + used by the system to generate the paths of output + artifacts. The artifact paths are generated with a sub-path + pattern ``{job_id}/{task_id}/{output_key}`` under the + specified output directory. The service account specified in + this pipeline must have the ``storage.objects.get`` and + ``storage.objects.create`` permissions for this bucket. + """ + + parameters = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=gca_value.Value, + ) + gcs_output_directory = proto.Field( + proto.STRING, + number=2, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + pipeline_spec = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Struct, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=pipeline_state.PipelineState, + ) + job_detail = proto.Field( + proto.MESSAGE, + number=9, + message='PipelineJobDetail', + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + runtime_config = proto.Field( + proto.MESSAGE, + number=12, + message=RuntimeConfig, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=16, + message=gca_encryption_spec.EncryptionSpec, + ) + service_account = proto.Field( + proto.STRING, + number=17, + ) + network = proto.Field( + proto.STRING, + number=18, + ) + + +class PipelineJobDetail(proto.Message): + r"""The runtime detail of PipelineJob. + Attributes: + pipeline_context (google.cloud.aiplatform_v1.types.Context): + Output only. The context of the pipeline. + pipeline_run_context (google.cloud.aiplatform_v1.types.Context): + Output only. The context of the current + pipeline run. + task_details (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail]): + Output only. The runtime details of the tasks + under the pipeline. + """ + + pipeline_context = proto.Field( + proto.MESSAGE, + number=1, + message=context.Context, + ) + pipeline_run_context = proto.Field( + proto.MESSAGE, + number=2, + message=context.Context, + ) + task_details = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='PipelineTaskDetail', + ) + + +class PipelineTaskDetail(proto.Message): + r"""The runtime detail of a task execution. + Attributes: + task_id (int): + Output only. The system generated ID of the + task. + parent_task_id (int): + Output only. The id of the parent task if the + task is within a component scope. Empty if the + task is at the root level. + task_name (str): + Output only. The user specified name of the task that is + defined in [PipelineJob.spec][]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task create time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task end time. + executor_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail): + Output only. The detailed execution info. + state (google.cloud.aiplatform_v1.types.PipelineTaskDetail.State): + Output only. State of the task. + execution (google.cloud.aiplatform_v1.types.Execution): + Output only. The execution metadata of the + task. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + task execution. Only populated when the task's + state is FAILED or CANCELLED. + inputs (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.InputsEntry]): + Output only. The runtime input artifacts of + the task. + outputs (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.OutputsEntry]): + Output only. The runtime output artifacts of + the task. + """ + class State(proto.Enum): + r"""Specifies state of TaskExecution""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + SUCCEEDED = 3 + CANCEL_PENDING = 4 + CANCELLING = 5 + CANCELLED = 6 + FAILED = 7 + SKIPPED = 8 + NOT_TRIGGERED = 9 + + class ArtifactList(proto.Message): + r"""A list of artifact metadata. + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): + Output only. A list of artifact metadata. + """ + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=artifact.Artifact, + ) + + task_id = proto.Field( + proto.INT64, + number=1, + ) + parent_task_id = proto.Field( + proto.INT64, + number=12, + ) + task_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + executor_detail = proto.Field( + proto.MESSAGE, + number=6, + message='PipelineTaskExecutorDetail', + ) + state = proto.Field( + proto.ENUM, + number=7, + enum=State, + ) + execution = proto.Field( + proto.MESSAGE, + number=8, + message=gca_execution.Execution, + ) + error = proto.Field( + proto.MESSAGE, + number=9, + message=status_pb2.Status, + ) + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=10, + message=ArtifactList, + ) + outputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=11, + message=ArtifactList, + ) + + +class PipelineTaskExecutorDetail(proto.Message): + r"""The runtime detail of a pipeline executor. + Attributes: + container_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail.ContainerDetail): + Output only. The detailed info for a + container executor. + custom_job_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail.CustomJobDetail): + Output only. The detailed info for a custom + job executor. + """ + + class ContainerDetail(proto.Message): + r"""The detail of a container execution. It contains the job + names of the lifecycle of a container execution. + + Attributes: + main_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1.CustomJob] for the + main container execution. + pre_caching_check_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1.CustomJob] for the + pre-caching-check container execution. This job will be + available if the + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] + specifies the ``pre_caching_check`` hook in the lifecycle + events. + """ + + main_job = proto.Field( + proto.STRING, + number=1, + ) + pre_caching_check_job = proto.Field( + proto.STRING, + number=2, + ) + + class CustomJobDetail(proto.Message): + r"""The detailed info for a custom job executor. + Attributes: + job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1.CustomJob]. + """ + + job = proto.Field( + proto.STRING, + number=1, + ) + + container_detail = proto.Field( + proto.MESSAGE, + number=1, + oneof='details', + message=ContainerDetail, + ) + custom_job_detail = proto.Field( + proto.MESSAGE, + number=2, + oneof='details', + message=CustomJobDetail, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py new file mode 100644 index 0000000000..28007208b5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -0,0 +1,381 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateTrainingPipelineRequest', + 'GetTrainingPipelineRequest', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'DeleteTrainingPipelineRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'GetPipelineJobRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'DeletePipelineJobRequest', + 'CancelPipelineJobRequest', + }, +) + + +class CreateTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline): + Required. The TrainingPipeline to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + training_pipeline = proto.Field( + proto.MESSAGE, + number=2, + message=gca_training_pipeline.TrainingPipeline, + ) + + +class GetTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline resource. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTrainingPipelinesRequest(proto.Message): + r"""Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the PipelineJobs that match the filter expression. The + following fields are supported: + + - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``labels``: Supports key-value equality and key presence. + + Filter expressions can be combined together using logical + operators (``AND`` & ``OR``). For example: + ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. + + The syntax to define filter expression is based on + https://google.aip.dev/160. + + Examples: + + - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` + PipelineJobs created or updated after 2020-05-18 00:00:00 + UTC. + - ``labels.env = "prod"`` PipelineJobs with label "env" set + to "prod". + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token] + of the previous + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListTrainingPipelinesResponse(proto.Message): + r"""Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] + + Attributes: + training_pipelines (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline]): + List of TrainingPipelines in the requested + page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + training_pipelines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_training_pipeline.TrainingPipeline, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline resource to be + deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreatePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): + Required. The PipelineJob to create. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not provided, an + ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + pipeline_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_pipeline_job.PipelineJob, + ) + pipeline_job_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListPipelineJobsRequest(proto.Message): + r"""Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. Supported fields: + + - ``display_name`` supports ``=`` and ``!=``. + - ``state`` supports ``=`` and ``!=``. + + The following examples demonstrate how to filter the list of + PipelineJobs: + + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` + - ``NOT display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token] + of the previous + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + call. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + + +class ListPipelineJobsResponse(proto.Message): + r"""Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Attributes: + pipeline_jobs (Sequence[google.cloud.aiplatform_v1.types.PipelineJob]): + List of PipelineJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1.ListPipelineJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + pipeline_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_pipeline_job.PipelineJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeletePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py new file mode 100644 index 0000000000..0b41968239 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'PipelineState', + }, +) + + +class PipelineState(proto.Enum): + r"""Describes the state of a pipeline.""" + PIPELINE_STATE_UNSPECIFIED = 0 + PIPELINE_STATE_QUEUED = 1 + PIPELINE_STATE_PENDING = 2 + PIPELINE_STATE_RUNNING = 3 + PIPELINE_STATE_SUCCEEDED = 4 + PIPELINE_STATE_FAILED = 5 + PIPELINE_STATE_CANCELLING = 6 + PIPELINE_STATE_CANCELLED = 7 + PIPELINE_STATE_PAUSED = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py new file mode 100644 index 0000000000..50f2f7baa9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'PredictRequest', + 'PredictResponse', + }, +) + + +class PredictRequest(proto.Message): + r"""Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the prediction + call. A DeployedModel may have an upper limit on the number + of instances it supports per request, and when it is + exceeded the prediction call errors in case of AutoML + Models, or, in case of customer created Models, the + behaviour is as documented by that Model. The schema of any + single instance may be specified via Endpoint's + DeployedModels' + [Model's][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of the + parameters may be specified via Endpoint's DeployedModels' + [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + instances = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + parameters = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + + +class PredictResponse(proto.Message): + r"""Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. + + Attributes: + predictions (Sequence[google.protobuf.struct_pb2.Value]): + The predictions that are the output of the predictions call. + The schema of any single prediction may be specified via + Endpoint's DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. + deployed_model_id (str): + ID of the Endpoint's DeployedModel that + served this prediction. + """ + + predictions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py new file mode 100644 index 0000000000..15ef6b0616 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'SpecialistPool', + }, +) + + +class SpecialistPool(proto.Message): + r"""SpecialistPool represents customers' own workforce to work on + their data labeling jobs. It includes a group of specialist + managers who are responsible for managing the labelers in this + pool as well as customers' data labeling jobs associated with + this pool. + Customers create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the jobs using + CrowdCompute console. + + Attributes: + name (str): + Required. The resource name of the + SpecialistPool. + display_name (str): + Required. The user-defined name of the + SpecialistPool. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + This field should be unique on project-level. + specialist_managers_count (int): + Output only. The number of Specialists in + this SpecialistPool. + specialist_manager_emails (Sequence[str]): + The email addresses of the specialists in the + SpecialistPool. + pending_data_labeling_jobs (Sequence[str]): + Output only. The resource name of the pending + data labeling jobs. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + specialist_managers_count = proto.Field( + proto.INT32, + number=3, + ) + specialist_manager_emails = proto.RepeatedField( + proto.STRING, + number=4, + ) + pending_data_labeling_jobs = proto.RepeatedField( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py new file mode 100644 index 0000000000..b853dc8ece --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py @@ -0,0 +1,237 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import operation +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'CreateSpecialistPoolRequest', + 'CreateSpecialistPoolOperationMetadata', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'DeleteSpecialistPoolRequest', + 'UpdateSpecialistPoolRequest', + 'UpdateSpecialistPoolOperationMetadata', + }, +) + + +class CreateSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. + + Attributes: + parent (str): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): + Required. The SpecialistPool to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + specialist_pool = proto.Field( + proto.MESSAGE, + number=2, + message=gca_specialist_pool.SpecialistPool, + ) + + +class CreateSpecialistPoolOperationMetadata(proto.Message): + r"""Runtime operation information for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. + + Attributes: + name (str): + Required. The name of the SpecialistPool resource. The form + is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListSpecialistPoolsRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + + Attributes: + parent (str): + Required. The name of the SpecialistPool's parent resource. + Format: ``projects/{project}/locations/{location}`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained by + [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1.ListSpecialistPoolsResponse.next_page_token] + of the previous + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools] + call. Return first page if empty. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + FieldMask represents a set of + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=4, + message=field_mask_pb2.FieldMask, + ) + + +class ListSpecialistPoolsResponse(proto.Message): + r"""Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. + + Attributes: + specialist_pools (Sequence[google.cloud.aiplatform_v1.types.SpecialistPool]): + A list of SpecialistPools that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + specialist_pools = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_specialist_pool.SpecialistPool, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. + + Attributes: + name (str): + Required. The resource name of the SpecialistPool to delete. + Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + force (bool): + If set to true, any specialist managers in + this SpecialistPool will also be deleted. + (Otherwise, the request will only work if the + SpecialistPool has no specialist managers.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class UpdateSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. + + Attributes: + specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): + Required. The SpecialistPool which replaces + the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the + resource. + """ + + specialist_pool = proto.Field( + proto.MESSAGE, + number=1, + message=gca_specialist_pool.SpecialistPool, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateSpecialistPoolOperationMetadata(proto.Message): + r"""Runtime operation metadata for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. + + Attributes: + specialist_pool (str): + Output only. The name of the SpecialistPool to which the + specialists are being added. Format: + ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`` + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + specialist_pool = proto.Field( + proto.STRING, + number=1, + ) + generic_metadata = proto.Field( + proto.MESSAGE, + number=2, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py new file mode 100644 index 0000000000..6c22549254 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py @@ -0,0 +1,609 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Trial', + 'StudySpec', + 'Measurement', + }, +) + + +class Trial(proto.Message): + r"""A message representing a Trial. A Trial contains a unique set + of Parameters that has been or will be evaluated, along with the + objective metrics got by running the Trial. + + Attributes: + name (str): + Output only. Resource name of the Trial + assigned by the service. + id (str): + Output only. The identifier of the Trial + assigned by the service. + state (google.cloud.aiplatform_v1.types.Trial.State): + Output only. The detailed state of the Trial. + parameters (Sequence[google.cloud.aiplatform_v1.types.Trial.Parameter]): + Output only. The parameters of the Trial. + final_measurement (google.cloud.aiplatform_v1.types.Measurement): + Output only. The final measurement containing + the objective value. + measurements (Sequence[google.cloud.aiplatform_v1.types.Measurement]): + Output only. A list of measurements that are strictly + lexicographically ordered by their induced tuples (steps, + elapsed_duration). These are used for early stopping + computations. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the Trial was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the Trial's status changed to + ``SUCCEEDED`` or ``INFEASIBLE``. + client_id (str): + Output only. The identifier of the client that originally + requested this Trial. Each client is identified by a unique + client_id. When a client asks for a suggestion, Vizier will + assign it a Trial. The client should evaluate the Trial, + complete it, and report back to Vizier. If suggestion is + asked again by same client_id before the Trial is completed, + the same Trial will be returned. Multiple clients with + different client_ids can ask for suggestions simultaneously, + each of them will get their own Trial. + infeasible_reason (str): + Output only. A human readable string describing why the + Trial is infeasible. This is set only if Trial state is + ``INFEASIBLE``. + custom_job (str): + Output only. The CustomJob name linked to the + Trial. It's set for a HyperparameterTuningJob's + Trial. + """ + class State(proto.Enum): + r"""Describes a Trial state.""" + STATE_UNSPECIFIED = 0 + REQUESTED = 1 + ACTIVE = 2 + STOPPING = 3 + SUCCEEDED = 4 + INFEASIBLE = 5 + + class Parameter(proto.Message): + r"""A message representing a parameter to be tuned. + Attributes: + parameter_id (str): + Output only. The ID of the parameter. The parameter should + be defined in [StudySpec's + Parameters][google.cloud.aiplatform.v1.StudySpec.parameters]. + value (google.protobuf.struct_pb2.Value): + Output only. The value of the parameter. ``number_value`` + will be set if a parameter defined in StudySpec is in type + 'INTEGER', 'DOUBLE' or 'DISCRETE'. ``string_value`` will be + set if a parameter defined in StudySpec is in type + 'CATEGORICAL'. + """ + + parameter_id = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + id = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=Parameter, + ) + final_measurement = proto.Field( + proto.MESSAGE, + number=5, + message='Measurement', + ) + measurements = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='Measurement', + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + client_id = proto.Field( + proto.STRING, + number=9, + ) + infeasible_reason = proto.Field( + proto.STRING, + number=10, + ) + custom_job = proto.Field( + proto.STRING, + number=11, + ) + + +class StudySpec(proto.Message): + r"""Represents specification of a Study. + Attributes: + metrics (Sequence[google.cloud.aiplatform_v1.types.StudySpec.MetricSpec]): + Required. Metric specs for the Study. + parameters (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec]): + Required. The set of parameters to tune. + algorithm (google.cloud.aiplatform_v1.types.StudySpec.Algorithm): + The search algorithm specified for the Study. + observation_noise (google.cloud.aiplatform_v1.types.StudySpec.ObservationNoise): + The observation noise level of the study. + Currently only supported by the Vizier service. + Not supported by HyperparamterTuningJob or + TrainingPipeline. + measurement_selection_type (google.cloud.aiplatform_v1.types.StudySpec.MeasurementSelectionType): + Describe which measurement selection type + will be used + """ + class Algorithm(proto.Enum): + r"""The available search algorithms for the Study.""" + ALGORITHM_UNSPECIFIED = 0 + GRID_SEARCH = 2 + RANDOM_SEARCH = 3 + + class ObservationNoise(proto.Enum): + r"""Describes the noise level of the repeated observations. + "Noisy" means that the repeated observations with the same Trial + parameters may lead to different metric evaluations. + """ + OBSERVATION_NOISE_UNSPECIFIED = 0 + LOW = 1 + HIGH = 2 + + class MeasurementSelectionType(proto.Enum): + r"""This indicates which measurement to use if/when the service + automatically selects the final measurement from previously reported + intermediate measurements. Choose this based on two considerations: + A) Do you expect your measurements to monotonically improve? If so, + choose LAST_MEASUREMENT. On the other hand, if you're in a situation + where your system can "over-train" and you expect the performance to + get better for a while but then start declining, choose + BEST_MEASUREMENT. B) Are your measurements significantly noisy + and/or irreproducible? If so, BEST_MEASUREMENT will tend to be + over-optimistic, and it may be better to choose LAST_MEASUREMENT. If + both or neither of (A) and (B) apply, it doesn't matter which + selection type is chosen. + """ + MEASUREMENT_SELECTION_TYPE_UNSPECIFIED = 0 + LAST_MEASUREMENT = 1 + BEST_MEASUREMENT = 2 + + class MetricSpec(proto.Message): + r"""Represents a metric to optimize. + Attributes: + metric_id (str): + Required. The ID of the metric. Must not + contain whitespaces and must be unique amongst + all MetricSpecs. + goal (google.cloud.aiplatform_v1.types.StudySpec.MetricSpec.GoalType): + Required. The optimization goal of the + metric. + """ + class GoalType(proto.Enum): + r"""The available types of optimization goals.""" + GOAL_TYPE_UNSPECIFIED = 0 + MAXIMIZE = 1 + MINIMIZE = 2 + + metric_id = proto.Field( + proto.STRING, + number=1, + ) + goal = proto.Field( + proto.ENUM, + number=2, + enum='StudySpec.MetricSpec.GoalType', + ) + + class ParameterSpec(proto.Message): + r"""Represents a single parameter to optimize. + Attributes: + double_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DoubleValueSpec): + The value spec for a 'DOUBLE' parameter. + integer_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.IntegerValueSpec): + The value spec for an 'INTEGER' parameter. + categorical_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.CategoricalValueSpec): + The value spec for a 'CATEGORICAL' parameter. + discrete_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DiscreteValueSpec): + The value spec for a 'DISCRETE' parameter. + parameter_id (str): + Required. The ID of the parameter. Must not + contain whitespaces and must be unique amongst + all ParameterSpecs. + scale_type (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ScaleType): + How the parameter should be scaled. Leave unset for + ``CATEGORICAL`` parameters. + conditional_parameter_specs (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec]): + A conditional parameter node is active if the parameter's + value matches the conditional node's parent_value_condition. + + If two items in conditional_parameter_specs have the same + name, they must have disjoint parent_value_condition. + """ + class ScaleType(proto.Enum): + r"""The type of scaling that should be applied to this parameter.""" + SCALE_TYPE_UNSPECIFIED = 0 + UNIT_LINEAR_SCALE = 1 + UNIT_LOG_SCALE = 2 + UNIT_REVERSE_LOG_SCALE = 3 + + class DoubleValueSpec(proto.Message): + r"""Value specification for a parameter in ``DOUBLE`` type. + Attributes: + min_value (float): + Required. Inclusive minimum value of the + parameter. + max_value (float): + Required. Inclusive maximum value of the + parameter. + default_value (float): + A default value for a ``DOUBLE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. + """ + + min_value = proto.Field( + proto.DOUBLE, + number=1, + ) + max_value = proto.Field( + proto.DOUBLE, + number=2, + ) + default_value = proto.Field( + proto.DOUBLE, + number=4, + optional=True, + ) + + class IntegerValueSpec(proto.Message): + r"""Value specification for a parameter in ``INTEGER`` type. + Attributes: + min_value (int): + Required. Inclusive minimum value of the + parameter. + max_value (int): + Required. Inclusive maximum value of the + parameter. + default_value (int): + A default value for an ``INTEGER`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. + """ + + min_value = proto.Field( + proto.INT64, + number=1, + ) + max_value = proto.Field( + proto.INT64, + number=2, + ) + default_value = proto.Field( + proto.INT64, + number=4, + optional=True, + ) + + class CategoricalValueSpec(proto.Message): + r"""Value specification for a parameter in ``CATEGORICAL`` type. + Attributes: + values (Sequence[str]): + Required. The list of possible categories. + default_value (str): + A default value for a ``CATEGORICAL`` parameter that is + assumed to be a relatively good starting point. Unset value + signals that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + default_value = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + + class DiscreteValueSpec(proto.Message): + r"""Value specification for a parameter in ``DISCRETE`` type. + Attributes: + values (Sequence[float]): + Required. A list of possible values. + The list should be in increasing order and at + least 1e-10 apart. For instance, this parameter + might have possible settings of 1.5, 2.5, and + 4.0. This list should not contain more than + 1,000 values. + default_value (float): + A default value for a ``DISCRETE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. It automatically + rounds to the nearest feasible discrete point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + default_value = proto.Field( + proto.DOUBLE, + number=3, + optional=True, + ) + + class ConditionalParameterSpec(proto.Message): + r"""Represents a parameter spec with condition from its parent + parameter. + + Attributes: + parent_discrete_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition): + The spec for matching values from a parent parameter of + ``DISCRETE`` type. + parent_int_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition): + The spec for matching values from a parent parameter of + ``INTEGER`` type. + parent_categorical_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition): + The spec for matching values from a parent parameter of + ``CATEGORICAL`` type. + parameter_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec): + Required. The spec for a conditional + parameter. + """ + + class DiscreteValueCondition(proto.Message): + r"""Represents the spec to match discrete values from parent + parameter. + + Attributes: + values (Sequence[float]): + Required. Matches values of the parent parameter of + 'DISCRETE' type. All values must exist in + ``discrete_value_spec`` of parent parameter. + + The Epsilon of the value matching is 1e-10. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + + class IntValueCondition(proto.Message): + r"""Represents the spec to match integer values from parent + parameter. + + Attributes: + values (Sequence[int]): + Required. Matches values of the parent parameter of + 'INTEGER' type. All values must lie in + ``integer_value_spec`` of parent parameter. + """ + + values = proto.RepeatedField( + proto.INT64, + number=1, + ) + + class CategoricalValueCondition(proto.Message): + r"""Represents the spec to match categorical values from parent + parameter. + + Attributes: + values (Sequence[str]): + Required. Matches values of the parent parameter of + 'CATEGORICAL' type. All values must exist in + ``categorical_value_spec`` of parent parameter. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + parent_discrete_values = proto.Field( + proto.MESSAGE, + number=2, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition', + ) + parent_int_values = proto.Field( + proto.MESSAGE, + number=3, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition', + ) + parent_categorical_values = proto.Field( + proto.MESSAGE, + number=4, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition', + ) + parameter_spec = proto.Field( + proto.MESSAGE, + number=1, + message='StudySpec.ParameterSpec', + ) + + double_value_spec = proto.Field( + proto.MESSAGE, + number=2, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.DoubleValueSpec', + ) + integer_value_spec = proto.Field( + proto.MESSAGE, + number=3, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.IntegerValueSpec', + ) + categorical_value_spec = proto.Field( + proto.MESSAGE, + number=4, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.CategoricalValueSpec', + ) + discrete_value_spec = proto.Field( + proto.MESSAGE, + number=5, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.DiscreteValueSpec', + ) + parameter_id = proto.Field( + proto.STRING, + number=1, + ) + scale_type = proto.Field( + proto.ENUM, + number=6, + enum='StudySpec.ParameterSpec.ScaleType', + ) + conditional_parameter_specs = proto.RepeatedField( + proto.MESSAGE, + number=10, + message='StudySpec.ParameterSpec.ConditionalParameterSpec', + ) + + metrics = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=MetricSpec, + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=ParameterSpec, + ) + algorithm = proto.Field( + proto.ENUM, + number=3, + enum=Algorithm, + ) + observation_noise = proto.Field( + proto.ENUM, + number=6, + enum=ObservationNoise, + ) + measurement_selection_type = proto.Field( + proto.ENUM, + number=7, + enum=MeasurementSelectionType, + ) + + +class Measurement(proto.Message): + r"""A message representing a Measurement of a Trial. A + Measurement contains the Metrics got by executing a Trial using + suggested hyperparameter values. + + Attributes: + elapsed_duration (google.protobuf.duration_pb2.Duration): + Output only. Time that the Trial has been + running at the point of this Measurement. + step_count (int): + Output only. The number of steps the machine + learning model has been trained for. Must be + non-negative. + metrics (Sequence[google.cloud.aiplatform_v1.types.Measurement.Metric]): + Output only. A list of metrics got by + evaluating the objective functions using + suggested Parameter values. + """ + + class Metric(proto.Message): + r"""A message representing a metric in the measurement. + Attributes: + metric_id (str): + Output only. The ID of the Metric. The Metric should be + defined in [StudySpec's + Metrics][google.cloud.aiplatform.v1.StudySpec.metrics]. + value (float): + Output only. The value for this metric. + """ + + metric_id = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.DOUBLE, + number=2, + ) + + elapsed_duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + step_count = proto.Field( + proto.INT64, + number=2, + ) + metrics = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Metric, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py new file mode 100644 index 0000000000..a48ebdbea6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py @@ -0,0 +1,547 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import pipeline_state +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'TrainingPipeline', + 'InputDataConfig', + 'FractionSplit', + 'FilterSplit', + 'PredefinedSplit', + 'TimestampSplit', + }, +) + + +class TrainingPipeline(proto.Message): + r"""The TrainingPipeline orchestrates tasks associated with training a + Model. It always executes the training task, and optionally may also + export data from Vertex AI's Dataset which becomes the training + input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + Attributes: + name (str): + Output only. Resource name of the + TrainingPipeline. + display_name (str): + Required. The user-defined name of this + TrainingPipeline. + input_data_config (google.cloud.aiplatform_v1.types.InputDataConfig): + Specifies Vertex AI owned input data that may be used for + training the Model. The TrainingPipeline's + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] + should make clear whether this config is used and if there + are any special requirements on how it should be filled. If + nothing about this config is mentioned in the + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], + then it should be assumed that the TrainingPipeline does not + depend on this configuration. + training_task_definition (str): + Required. A Google Cloud Storage path to the + YAML file that defines the training task which + is responsible for producing the model artifact, + and may also include additional auxiliary work. + The definition files that can be used here are + found in gs://google-cloud- + aiplatform/schema/trainingjob/definition/. Note: + The URI given on output will be immutable and + probably different, including the URI scheme, + than the one given on input. The output URI will + point to a location where the user only has a + read access. + training_task_inputs (google.protobuf.struct_pb2.Value): + Required. The training task's parameter(s), as specified in + the + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s + ``inputs``. + training_task_metadata (google.protobuf.struct_pb2.Value): + Output only. The metadata information as specified in the + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s + ``metadata``. This metadata is an auxiliary runtime and + final information about the training task. While the + pipeline is running this information is populated only at a + best effort basis. Only present if the pipeline's + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] + contains ``metadata`` object. + model_to_upload (google.cloud.aiplatform_v1.types.Model): + Describes the Model that may be uploaded (via + [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]) + by this TrainingPipeline. The TrainingPipeline's + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] + should make clear whether this Model description should be + populated, and if there are any special requirements + regarding how it should be filled. If nothing is mentioned + in the + [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], + then it should be assumed that this field should not be + filled and the training task either uploads the Model + without a need of this information, or that training task + does not support uploading a Model as part of the pipeline. + When the Pipeline's state becomes + ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been + uploaded into Vertex AI, then the model_to_upload's resource + [name][google.cloud.aiplatform.v1.Model.name] is populated. + The Model is always uploaded into the Project and Location + in which this pipeline is. + state (google.cloud.aiplatform_v1.types.PipelineState): + Output only. The detailed state of the + pipeline. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the pipeline's state is + ``PIPELINE_STATE_FAILED`` or ``PIPELINE_STATE_CANCELLED``. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline + was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline for the first + time entered the ``PIPELINE_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline entered any of + the following states: ``PIPELINE_STATE_SUCCEEDED``, + ``PIPELINE_STATE_FAILED``, ``PIPELINE_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline.LabelsEntry]): + The labels with user-defined metadata to + organize TrainingPipelines. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Customer-managed encryption key spec for a TrainingPipeline. + If set, this TrainingPipeline will be secured by this key. + + Note: Model trained by this TrainingPipeline is also secured + by this key if + [model_to_upload][google.cloud.aiplatform.v1.TrainingPipeline.encryption_spec] + is not set separately. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + input_data_config = proto.Field( + proto.MESSAGE, + number=3, + message='InputDataConfig', + ) + training_task_definition = proto.Field( + proto.STRING, + number=4, + ) + training_task_inputs = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.Value, + ) + training_task_metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + model_to_upload = proto.Field( + proto.MESSAGE, + number=7, + message=model.Model, + ) + state = proto.Field( + proto.ENUM, + number=9, + enum=pipeline_state.PipelineState, + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=15, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=18, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class InputDataConfig(proto.Message): + r"""Specifies Vertex AI owned input data to be used for training, + and possibly evaluating, the Model. + + Attributes: + fraction_split (google.cloud.aiplatform_v1.types.FractionSplit): + Split based on fractions defining the size of + each set. + filter_split (google.cloud.aiplatform_v1.types.FilterSplit): + Split based on the provided filters for each + set. + predefined_split (google.cloud.aiplatform_v1.types.PredefinedSplit): + Supported only for tabular Datasets. + Split based on a predefined key. + timestamp_split (google.cloud.aiplatform_v1.types.TimestampSplit): + Supported only for tabular Datasets. + Split based on the timestamp of the input data + pieces. + gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): + The Cloud Storage location where the training data is to be + written to. In the given directory a new directory is + created with name: + ``dataset---`` + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format. All training input data is written into that + directory. + + The Vertex AI environment variables representing Cloud + Storage data URIs are represented in the Cloud Storage + wildcard format to support sharded data. e.g.: + "gs://.../training-*.jsonl" + + - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for + tabular data + + - AIP_TRAINING_DATA_URI = + "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" + + - AIP_VALIDATION_DATA_URI = + "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" + + - AIP_TEST_DATA_URI = + "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". + bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): + Only applicable to custom training with tabular Dataset with + BigQuery source. + + The BigQuery project location where the training data is to + be written to. In the given project a new dataset is created + with name + ``dataset___`` + where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All + training input data is written into that dataset. In the + dataset three tables are created, ``training``, + ``validation`` and ``test``. + + - AIP_DATA_FORMAT = "bigquery". + + - AIP_TRAINING_DATA_URI = + "bigquery_destination.dataset\_\ **\ .training" + + - AIP_VALIDATION_DATA_URI = + "bigquery_destination.dataset\_\ **\ .validation" + + - AIP_TEST_DATA_URI = + "bigquery_destination.dataset\_\ **\ .test". + dataset_id (str): + Required. The ID of the Dataset in the same Project and + Location which data will be used to train the Model. The + Dataset must use schema compatible with Model being trained, + and what is compatible should be described in the used + TrainingPipeline's [training_task_definition] + [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]. + For tabular Datasets, all their data is exported to + training, to pick and choose from. + annotations_filter (str): + Applicable only to Datasets that have DataItems and + Annotations. + + A filter on Annotations of the Dataset. Only Annotations + that both match this filter and belong to DataItems not + ignored by the split method are used in respectively + training, validation or test role, depending on the role of + the DataItem they are on (for the auto-assigned that role is + decided by Vertex AI). A filter with same syntax as the one + used in + [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations] + may be used, but note here it filters across all Annotations + of the Dataset, and not just within a single DataItem. + annotation_schema_uri (str): + Applicable only to custom training with Datasets that have + DataItems and Annotations. + + Cloud Storage URI that points to a YAML file describing the + annotation schema. The schema is defined as an OpenAPI 3.0.2 + `Schema + Object `__. + The schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/ , + note that the chosen schema must be consistent with + [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] + of the Dataset specified by + [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id]. + + Only Annotations that both match this schema and belong to + DataItems not ignored by the split method are used in + respectively training, validation or test role, depending on + the role of the DataItem they are on. + + When used in conjunction with + [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter], + the Annotations used for training are filtered by both + [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter] + and + [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]. + """ + + fraction_split = proto.Field( + proto.MESSAGE, + number=2, + oneof='split', + message='FractionSplit', + ) + filter_split = proto.Field( + proto.MESSAGE, + number=3, + oneof='split', + message='FilterSplit', + ) + predefined_split = proto.Field( + proto.MESSAGE, + number=4, + oneof='split', + message='PredefinedSplit', + ) + timestamp_split = proto.Field( + proto.MESSAGE, + number=5, + oneof='split', + message='TimestampSplit', + ) + gcs_destination = proto.Field( + proto.MESSAGE, + number=8, + oneof='destination', + message=io.GcsDestination, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=10, + oneof='destination', + message=io.BigQueryDestination, + ) + dataset_id = proto.Field( + proto.STRING, + number=1, + ) + annotations_filter = proto.Field( + proto.STRING, + number=6, + ) + annotation_schema_uri = proto.Field( + proto.STRING, + number=9, + ) + + +class FractionSplit(proto.Message): + r"""Assigns the input data to training, validation, and test sets as per + the given fractions. Any of ``training_fraction``, + ``validation_fraction`` and ``test_fraction`` may optionally be + provided, they must sum to up to 1. If the provided ones sum to less + than 1, the remainder is assigned to sets as decided by Vertex AI. + If none of the fractions are set, by default roughly 80% of data is + used for training, 10% for validation, and 10% for test. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + + +class FilterSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on the given filters, data pieces not matched by any + filter are ignored. Currently only supported for Datasets + containing DataItems. + If any of the filters in this message are to match nothing, then + they can be set as '-' (the minus sign). + + Supported only for unstructured Datasets. + + Attributes: + training_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to train the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + validation_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to validate the Model. A + filter with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + test_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to test the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + """ + + training_filter = proto.Field( + proto.STRING, + number=1, + ) + validation_filter = proto.Field( + proto.STRING, + number=2, + ) + test_filter = proto.Field( + proto.STRING, + number=3, + ) + + +class PredefinedSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on the value of a provided key. + + Supported only for tabular Datasets. + + Attributes: + key (str): + Required. The key is a name of one of the Dataset's data + columns. The value of the key (either the label's value or + value in the column) must be one of {``training``, + ``validation``, ``test``}, and it defines to which set the + given piece of data is assigned. If for a piece of data the + key is not present or has an invalid value, that piece is + ignored by the pipeline. + """ + + key = proto.Field( + proto.STRING, + number=1, + ) + + +class TimestampSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on a provided timestamps. The youngest data pieces are + assigned to training set, next to validation set, and the oldest + to the test set. + Supported only for tabular Datasets. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + key (str): + Required. The key is a name of one of the Dataset's data + columns. The values of the key (the values in the column) + must be in RFC 3339 ``date-time`` format, where + ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). If + for a piece of data the key is not present or has an invalid + value, that piece is ignored by the pipeline. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + key = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py new file mode 100644 index 0000000000..ad8683bb9b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'UserActionReference', + }, +) + + +class UserActionReference(proto.Message): + r"""References an API call. It contains more information about + long running operation and Jobs that are triggered by the API + call. + + Attributes: + operation (str): + For API calls that return a long running + operation. Resource name of the long running + operation. Format: + 'projects/{project}/locations/{location}/operations/{operation}' + data_labeling_job (str): + For API calls that start a LabelingJob. Resource name of the + LabelingJob. Format: + 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' + method (str): + The method name of the API RPC call. For + example, + "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". + """ + + operation = proto.Field( + proto.STRING, + number=1, + oneof='reference', + ) + data_labeling_job = proto.Field( + proto.STRING, + number=2, + oneof='reference', + ) + method = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py new file mode 100644 index 0000000000..48c1092c1e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1', + manifest={ + 'Value', + }, +) + + +class Value(proto.Message): + r"""Value is the value of the field. + Attributes: + int_value (int): + An integer value. + double_value (float): + A double value. + string_value (str): + A string value. + """ + + int_value = proto.Field( + proto.INT64, + number=1, + oneof='value', + ) + double_value = proto.Field( + proto.DOUBLE, + number=2, + oneof='value', + ) + string_value = proto.Field( + proto.STRING, + number=3, + oneof='value', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py new file mode 100644 index 0000000000..d325e6cfda --- /dev/null +++ b/owl-bot-staging/v1/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1/schema/predict/instance/.coveragerc b/owl-bot-staging/v1/schema/predict/instance/.coveragerc new file mode 100644 index 0000000000..8bd7ec75f6 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1/schema/predict/instance/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/schema/predict/instance/MANIFEST.in b/owl-bot-staging/v1/schema/predict/instance/MANIFEST.in new file mode 100644 index 0000000000..05ea9a66cb --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1/schema/predict/instance *.py +recursive-include google/cloud/aiplatform/v1/schema/predict/instance_v1 *.py diff --git a/owl-bot-staging/v1/schema/predict/instance/README.rst b/owl-bot-staging/v1/schema/predict/instance/README.rst new file mode 100644 index 0000000000..5e0175e9e4 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1 Schema Predict Instance API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1 Schema Predict Instance API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/schema/predict/instance/docs/conf.py b/owl-bot-staging/v1/schema/predict/instance/docs/conf.py new file mode 100644 index 0000000000..a12fb006b9 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1-schema-predict-instance documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1-schema-predict-instance" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1 Schema Predict Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1-schema-predict-instance-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-instance.tex", + u"google-cloud-aiplatform-v1-schema-predict-instance Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-instance", + u"Google Cloud Aiplatform V1 Schema Predict Instance Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-instance", + u"google-cloud-aiplatform-v1-schema-predict-instance Documentation", + author, + "google-cloud-aiplatform-v1-schema-predict-instance", + "GAPIC library for Google Cloud Aiplatform V1 Schema Predict Instance API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/schema/predict/instance/docs/index.rst b/owl-bot-staging/v1/schema/predict/instance/docs/index.rst new file mode 100644 index 0000000000..e6c2d156ca --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + instance_v1/services + instance_v1/types diff --git a/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/services.rst b/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/services.rst new file mode 100644 index 0000000000..50c011c69a --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Instance v1 API +====================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/types.rst b/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/types.rst new file mode 100644 index 0000000000..564ab013ee --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Instance v1 API +=================================================================== + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.instance_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..41d6704c1f --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_classification import ImageClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_object_detection import ImageObjectDetectionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_segmentation import ImageSegmentationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_classification import TextClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_extraction import TextExtractionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_sentiment import TextSentimentPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_action_recognition import VideoActionRecognitionPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_classification import VideoClassificationPredictionInstance +from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ('ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..f70e7f605a --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py new file mode 100644 index 0000000000..41ab5407a7 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ( +'ImageClassificationPredictionInstance', +'ImageObjectDetectionPredictionInstance', +'ImageSegmentationPredictionInstance', +'TextClassificationPredictionInstance', +'TextExtractionPredictionInstance', +'TextSentimentPredictionInstance', +'VideoActionRecognitionPredictionInstance', +'VideoClassificationPredictionInstance', +'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json new file mode 100644 index 0000000000..0ae909d6ea --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.instance_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.instance", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed new file mode 100644 index 0000000000..f70e7f605a --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py new file mode 100644 index 0000000000..80a5332604 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + 'ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py new file mode 100644 index 0000000000..02e95ba9c7 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageClassificationPredictionInstance', + }, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py new file mode 100644 index 0000000000..0b9aadc101 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageObjectDetectionPredictionInstance', + }, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py new file mode 100644 index 0000000000..f967807e6c --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'ImageSegmentationPredictionInstance', + }, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py new file mode 100644 index 0000000000..4eec13516c --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextClassificationPredictionInstance', + }, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py new file mode 100644 index 0000000000..ffecb3de51 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextExtractionPredictionInstance', + }, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. Vertex AI will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + key = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py new file mode 100644 index 0000000000..5bdfe5d5ba --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'TextSentimentPredictionInstance', + }, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..d53782868f --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoActionRecognitionPredictionInstance', + }, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py new file mode 100644 index 0000000000..b51ab464a4 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoClassificationPredictionInstance', + }, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..8b96f75fd2 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.instance', + manifest={ + 'VideoObjectTrackingPredictionInstance', + }, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/mypy.ini b/owl-bot-staging/v1/schema/predict/instance/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1/schema/predict/instance/noxfile.py b/owl-bot-staging/v1/schema/predict/instance/noxfile.py new file mode 100644 index 0000000000..c8d234927c --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1/schema/predict/instance_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1/schema/predict/instance/scripts/fixup_instance_v1_keywords.py b/owl-bot-staging/v1/schema/predict/instance/scripts/fixup_instance_v1_keywords.py new file mode 100644 index 0000000000..d9e8bd0b17 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/scripts/fixup_instance_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class instanceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=instanceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the instance client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/schema/predict/instance/setup.py b/owl-bot-staging/v1/schema/predict/instance/setup.py new file mode 100644 index 0000000000..9449a0a922 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1-schema-predict-instance', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.predict'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1/schema/predict/instance/tests/__init__.py b/owl-bot-staging/v1/schema/predict/instance/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/instance/tests/unit/__init__.py b/owl-bot-staging/v1/schema/predict/instance/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py b/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/params/.coveragerc b/owl-bot-staging/v1/schema/predict/params/.coveragerc new file mode 100644 index 0000000000..06aa51376d --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1/schema/predict/params/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/schema/predict/params/MANIFEST.in b/owl-bot-staging/v1/schema/predict/params/MANIFEST.in new file mode 100644 index 0000000000..b990fba651 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1/schema/predict/params *.py +recursive-include google/cloud/aiplatform/v1/schema/predict/params_v1 *.py diff --git a/owl-bot-staging/v1/schema/predict/params/README.rst b/owl-bot-staging/v1/schema/predict/params/README.rst new file mode 100644 index 0000000000..60be3b2705 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1 Schema Predict Params API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1 Schema Predict Params API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/schema/predict/params/docs/conf.py b/owl-bot-staging/v1/schema/predict/params/docs/conf.py new file mode 100644 index 0000000000..6917071403 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1-schema-predict-params documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1-schema-predict-params" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1 Schema Predict Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1-schema-predict-params-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-params.tex", + u"google-cloud-aiplatform-v1-schema-predict-params Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-params", + u"Google Cloud Aiplatform V1 Schema Predict Params Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-params", + u"google-cloud-aiplatform-v1-schema-predict-params Documentation", + author, + "google-cloud-aiplatform-v1-schema-predict-params", + "GAPIC library for Google Cloud Aiplatform V1 Schema Predict Params API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/schema/predict/params/docs/index.rst b/owl-bot-staging/v1/schema/predict/params/docs/index.rst new file mode 100644 index 0000000000..e90a1a5814 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + params_v1/services + params_v1/types diff --git a/owl-bot-staging/v1/schema/predict/params/docs/params_v1/services.rst b/owl-bot-staging/v1/schema/predict/params/docs/params_v1/services.rst new file mode 100644 index 0000000000..bf08ea6e98 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/docs/params_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Params v1 API +==================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/schema/predict/params/docs/params_v1/types.rst b/owl-bot-staging/v1/schema/predict/params/docs/params_v1/types.rst new file mode 100644 index 0000000000..956ef5224d --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/docs/params_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Params v1 API +================================================================= + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.params_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..91ae7f0d5c --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_classification import ImageClassificationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_object_detection import ImageObjectDetectionPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_segmentation import ImageSegmentationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_action_recognition import VideoActionRecognitionPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_classification import VideoClassificationPredictionParams +from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ('ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed new file mode 100644 index 0000000000..df96e61590 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py new file mode 100644 index 0000000000..91b718b437 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ( +'ImageClassificationPredictionParams', +'ImageObjectDetectionPredictionParams', +'ImageSegmentationPredictionParams', +'VideoActionRecognitionPredictionParams', +'VideoClassificationPredictionParams', +'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json new file mode 100644 index 0000000000..edfffb441b --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.params_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.params", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed new file mode 100644 index 0000000000..df96e61590 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py new file mode 100644 index 0000000000..70a92bb59c --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + 'ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py new file mode 100644 index 0000000000..1668600544 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageClassificationPredictionParams', + }, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py new file mode 100644 index 0000000000..43c7814607 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageObjectDetectionPredictionParams', + }, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py new file mode 100644 index 0000000000..695a3a7745 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'ImageSegmentationPredictionParams', + }, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..88e714e9cf --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoActionRecognitionPredictionParams', + }, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py new file mode 100644 index 0000000000..06b2347eb6 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoClassificationPredictionParams', + }, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. Vertex AI returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. Vertex AI determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. Vertex AI then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + segment_classification = proto.Field( + proto.BOOL, + number=3, + ) + shot_classification = proto.Field( + proto.BOOL, + number=4, + ) + one_sec_interval_classification = proto.Field( + proto.BOOL, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..820a73e3c6 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.params', + manifest={ + 'VideoObjectTrackingPredictionParams', + }, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + min_bounding_box_size = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/mypy.ini b/owl-bot-staging/v1/schema/predict/params/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1/schema/predict/params/noxfile.py b/owl-bot-staging/v1/schema/predict/params/noxfile.py new file mode 100644 index 0000000000..e886ab4759 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1/schema/predict/params_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1/schema/predict/params/scripts/fixup_params_v1_keywords.py b/owl-bot-staging/v1/schema/predict/params/scripts/fixup_params_v1_keywords.py new file mode 100644 index 0000000000..17915abed8 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/scripts/fixup_params_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class paramsCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=paramsCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the params client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/schema/predict/params/setup.py b/owl-bot-staging/v1/schema/predict/params/setup.py new file mode 100644 index 0000000000..94507f278c --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1-schema-predict-params', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.predict'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1/schema/predict/params/tests/__init__.py b/owl-bot-staging/v1/schema/predict/params/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/params/tests/unit/__init__.py b/owl-bot-staging/v1/schema/predict/params/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/params_v1/__init__.py b/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/params_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/params_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/prediction/.coveragerc b/owl-bot-staging/v1/schema/predict/prediction/.coveragerc new file mode 100644 index 0000000000..a49b3b6dd8 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/schema/predict/prediction/MANIFEST.in b/owl-bot-staging/v1/schema/predict/prediction/MANIFEST.in new file mode 100644 index 0000000000..4cca31bd6b --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1/schema/predict/prediction *.py +recursive-include google/cloud/aiplatform/v1/schema/predict/prediction_v1 *.py diff --git a/owl-bot-staging/v1/schema/predict/prediction/README.rst b/owl-bot-staging/v1/schema/predict/prediction/README.rst new file mode 100644 index 0000000000..acb7f67d13 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1 Schema Predict Prediction API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1 Schema Predict Prediction API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/schema/predict/prediction/docs/conf.py b/owl-bot-staging/v1/schema/predict/prediction/docs/conf.py new file mode 100644 index 0000000000..c0f73900a9 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1-schema-predict-prediction documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1-schema-predict-prediction" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1 Schema Predict Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1-schema-predict-prediction-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-prediction.tex", + u"google-cloud-aiplatform-v1-schema-predict-prediction Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-prediction", + u"Google Cloud Aiplatform V1 Schema Predict Prediction Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1-schema-predict-prediction", + u"google-cloud-aiplatform-v1-schema-predict-prediction Documentation", + author, + "google-cloud-aiplatform-v1-schema-predict-prediction", + "GAPIC library for Google Cloud Aiplatform V1 Schema Predict Prediction API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/schema/predict/prediction/docs/index.rst b/owl-bot-staging/v1/schema/predict/prediction/docs/index.rst new file mode 100644 index 0000000000..f28df12991 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + prediction_v1/services + prediction_v1/types diff --git a/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/services.rst b/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/services.rst new file mode 100644 index 0000000000..ad6f034387 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API +======================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/types.rst b/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/types.rst new file mode 100644 index 0000000000..a97faf34de --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API +===================================================================== + +.. automodule:: google.cloud.aiplatform.v1.schema.predict.prediction_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..27d9f97862 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.classification import ClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_object_detection import ImageObjectDetectionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_segmentation import ImageSegmentationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_classification import TabularClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_regression import TabularRegressionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_extraction import TextExtractionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_sentiment import TextSentimentPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_action_recognition import VideoActionRecognitionPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_classification import VideoClassificationPredictionResult +from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ('ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..472fa4d8cc --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py new file mode 100644 index 0000000000..3cf9304526 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ( +'ClassificationPredictionResult', +'ImageObjectDetectionPredictionResult', +'ImageSegmentationPredictionResult', +'TabularClassificationPredictionResult', +'TabularRegressionPredictionResult', +'TextExtractionPredictionResult', +'TextSentimentPredictionResult', +'VideoActionRecognitionPredictionResult', +'VideoClassificationPredictionResult', +'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json new file mode 100644 index 0000000000..ba1d67a00c --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.prediction_v1", + "protoPackage": "google.cloud.aiplatform.v1.schema.predict.prediction", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed new file mode 100644 index 0000000000..472fa4d8cc --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py new file mode 100644 index 0000000000..b7b7c056aa --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + 'ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py new file mode 100644 index 0000000000..f14b084951 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ClassificationPredictionResult', + }, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py new file mode 100644 index 0000000000..74178c5502 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ImageObjectDetectionPredictionResult', + }, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + bboxes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=struct_pb2.ListValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py new file mode 100644 index 0000000000..e93991222a --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'ImageSegmentationPredictionResult', + }, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + Attributes: + category_mask (str): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (str): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field( + proto.STRING, + number=1, + ) + confidence_mask = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py new file mode 100644 index 0000000000..a36bf8f991 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TabularClassificationPredictionResult', + }, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField( + proto.STRING, + number=1, + ) + scores = proto.RepeatedField( + proto.FLOAT, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py new file mode 100644 index 0000000000..56af2af196 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TabularRegressionPredictionResult', + }, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + lower_bound = proto.Field( + proto.FLOAT, + number=2, + ) + upper_bound = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py new file mode 100644 index 0000000000..3e7398f165 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TextExtractionPredictionResult', + }, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + text_segment_start_offsets = proto.RepeatedField( + proto.INT64, + number=3, + ) + text_segment_end_offsets = proto.RepeatedField( + proto.INT64, + number=4, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py new file mode 100644 index 0000000000..135db45729 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'TextSentimentPredictionResult', + }, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Prediction output format for Text Sentiment + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py new file mode 100644 index 0000000000..5a853655ae --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoActionRecognitionPredictionResult', + }, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py new file mode 100644 index 0000000000..da14b3253e --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoClassificationPredictionResult', + }, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + type_ = proto.Field( + proto.STRING, + number=3, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py new file mode 100644 index 0000000000..9b70e913cd --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1.schema.predict.prediction', + manifest={ + 'VideoObjectTrackingPredictionResult', + }, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (google.protobuf.duration_pb2.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (google.protobuf.wrappers_pb2.FloatValue): + The leftmost coordinate of the bounding box. + x_max (google.protobuf.wrappers_pb2.FloatValue): + The rightmost coordinate of the bounding box. + y_min (google.protobuf.wrappers_pb2.FloatValue): + The topmost coordinate of the bounding box. + y_max (google.protobuf.wrappers_pb2.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + x_min = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.FloatValue, + ) + x_max = proto.Field( + proto.MESSAGE, + number=3, + message=wrappers_pb2.FloatValue, + ) + y_min = proto.Field( + proto.MESSAGE, + number=4, + message=wrappers_pb2.FloatValue, + ) + y_max = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + frames = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=Frame, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/mypy.ini b/owl-bot-staging/v1/schema/predict/prediction/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1/schema/predict/prediction/noxfile.py b/owl-bot-staging/v1/schema/predict/prediction/noxfile.py new file mode 100644 index 0000000000..8ea8ef006a --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1/schema/predict/prediction_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py b/owl-bot-staging/v1/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py new file mode 100644 index 0000000000..7860bb63cf --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class predictionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=predictionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the prediction client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/schema/predict/prediction/setup.py b/owl-bot-staging/v1/schema/predict/prediction/setup.py new file mode 100644 index 0000000000..3456e20d11 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1-schema-predict-prediction', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.predict'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1/schema/predict/prediction/tests/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/prediction/tests/unit/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py new file mode 100644 index 0000000000..01e80ac919 --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py @@ -0,0 +1,240 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class aiplatformCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'batch_migrate_resources': ('parent', 'migrate_resource_requests', ), + 'cancel_batch_prediction_job': ('name', ), + 'cancel_custom_job': ('name', ), + 'cancel_data_labeling_job': ('name', ), + 'cancel_hyperparameter_tuning_job': ('name', ), + 'cancel_pipeline_job': ('name', ), + 'cancel_training_pipeline': ('name', ), + 'create_batch_prediction_job': ('parent', 'batch_prediction_job', ), + 'create_custom_job': ('parent', 'custom_job', ), + 'create_data_labeling_job': ('parent', 'data_labeling_job', ), + 'create_dataset': ('parent', 'dataset', ), + 'create_endpoint': ('parent', 'endpoint', ), + 'create_hyperparameter_tuning_job': ('parent', 'hyperparameter_tuning_job', ), + 'create_pipeline_job': ('parent', 'pipeline_job', 'pipeline_job_id', ), + 'create_specialist_pool': ('parent', 'specialist_pool', ), + 'create_training_pipeline': ('parent', 'training_pipeline', ), + 'delete_batch_prediction_job': ('name', ), + 'delete_custom_job': ('name', ), + 'delete_data_labeling_job': ('name', ), + 'delete_dataset': ('name', ), + 'delete_endpoint': ('name', ), + 'delete_hyperparameter_tuning_job': ('name', ), + 'delete_model': ('name', ), + 'delete_pipeline_job': ('name', ), + 'delete_specialist_pool': ('name', 'force', ), + 'delete_training_pipeline': ('name', ), + 'deploy_model': ('endpoint', 'deployed_model', 'traffic_split', ), + 'export_data': ('name', 'export_config', ), + 'export_model': ('name', 'output_config', ), + 'get_annotation_spec': ('name', 'read_mask', ), + 'get_batch_prediction_job': ('name', ), + 'get_custom_job': ('name', ), + 'get_data_labeling_job': ('name', ), + 'get_dataset': ('name', 'read_mask', ), + 'get_endpoint': ('name', ), + 'get_hyperparameter_tuning_job': ('name', ), + 'get_model': ('name', ), + 'get_model_evaluation': ('name', ), + 'get_model_evaluation_slice': ('name', ), + 'get_pipeline_job': ('name', ), + 'get_specialist_pool': ('name', ), + 'get_training_pipeline': ('name', ), + 'import_data': ('name', 'import_configs', ), + 'list_annotations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_batch_prediction_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_custom_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_data_items': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_data_labeling_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_hyperparameter_tuning_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_model_evaluation_slices': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_models': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_pipeline_jobs': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_specialist_pools': ('parent', 'page_size', 'page_token', 'read_mask', ), + 'list_training_pipelines': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'predict': ('endpoint', 'instances', 'parameters', ), + 'search_migratable_resources': ('parent', 'page_size', 'page_token', 'filter', ), + 'undeploy_model': ('endpoint', 'deployed_model_id', 'traffic_split', ), + 'update_dataset': ('dataset', 'update_mask', ), + 'update_endpoint': ('endpoint', 'update_mask', ), + 'update_model': ('model', 'update_mask', ), + 'update_specialist_pool': ('specialist_pool', 'update_mask', ), + 'upload_model': ('parent', 'model', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=aiplatformCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the aiplatform client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py new file mode 100644 index 0000000000..4759d2731c --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class definitionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=definitionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the definition client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py new file mode 100644 index 0000000000..d9e8bd0b17 --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class instanceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=instanceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the instance client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py new file mode 100644 index 0000000000..17915abed8 --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class paramsCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=paramsCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the params client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py new file mode 100644 index 0000000000..7860bb63cf --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class predictionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=predictionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the prediction client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py new file mode 100644 index 0000000000..cf6098bbed --- /dev/null +++ b/owl-bot-staging/v1/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1-schema-trainingjob-definition', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.trainingjob'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py new file mode 100644 index 0000000000..83ede00e4c --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py @@ -0,0 +1,3983 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.dataset_service import DatasetServiceAsyncClient +from google.cloud.aiplatform_v1.services.dataset_service import DatasetServiceClient +from google.cloud.aiplatform_v1.services.dataset_service import pagers +from google.cloud.aiplatform_v1.services.dataset_service import transports +from google.cloud.aiplatform_v1.services.dataset_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1.types import annotation +from google.cloud.aiplatform_v1.types import annotation_spec +from google.cloud.aiplatform_v1.types import data_item +from google.cloud.aiplatform_v1.types import dataset +from google.cloud.aiplatform_v1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1.types import dataset_service +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert DatasetServiceClient._get_default_mtls_endpoint(None) is None + assert DatasetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + DatasetServiceClient, + DatasetServiceAsyncClient, +]) +def test_dataset_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + DatasetServiceClient, + DatasetServiceAsyncClient, +]) +def test_dataset_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.DatasetServiceGrpcTransport, "grpc"), + (transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + DatasetServiceClient, + DatasetServiceAsyncClient, +]) +def test_dataset_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_dataset_service_client_get_transport_class(): + transport = DatasetServiceClient.get_transport_class() + available_transports = [ + transports.DatasetServiceGrpcTransport, + ] + assert transport in available_transports + + transport = DatasetServiceClient.get_transport_class("grpc") + assert transport == transports.DatasetServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +def test_dataset_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_dataset_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_dataset_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = DatasetServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_dataset(transport: str = 'grpc', request_type=dataset_service.CreateDatasetRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_dataset_from_dict(): + test_create_dataset(request_type=dict) + + +def test_create_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + client.create_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + +@pytest.mark.asyncio +async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.CreateDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_dataset_async_from_dict(): + await test_create_dataset_async(request_type=dict) + + +def test_create_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.CreateDatasetRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.CreateDatasetRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].dataset == gca_dataset.Dataset(name='name_value') + + +def test_create_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + dataset_service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].dataset == gca_dataset.Dataset(name='name_value') + + +@pytest.mark.asyncio +async def test_create_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_dataset( + dataset_service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + +def test_get_dataset(transport: str = 'grpc', request_type=dataset_service.GetDatasetRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset( + name='name_value', + display_name='display_name_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_get_dataset_from_dict(): + test_get_dataset(request_type=dict) + + +def test_get_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + client.get_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + +@pytest.mark.asyncio +async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( + name='name_value', + display_name='display_name_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_dataset_async_from_dict(): + await test_get_dataset_async(request_type=dict) + + +def test_get_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetDatasetRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = dataset.Dataset() + client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetDatasetRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + dataset_service.GetDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_dataset( + dataset_service.GetDatasetRequest(), + name='name_value', + ) + + +def test_update_dataset(transport: str = 'grpc', request_type=dataset_service.UpdateDatasetRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_update_dataset_from_dict(): + test_update_dataset(request_type=dict) + + +def test_update_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + client.update_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + +@pytest.mark.asyncio +async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.UpdateDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_dataset_async_from_dict(): + await test_update_dataset_async(request_type=dict) + + +def test_update_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.UpdateDatasetRequest() + + request.dataset.name = 'dataset.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = gca_dataset.Dataset() + client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=dataset.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.UpdateDatasetRequest() + + request.dataset.name = 'dataset.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=dataset.name/value', + ) in kw['metadata'] + + +def test_update_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_dataset( + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].dataset == gca_dataset.Dataset(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + dataset_service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_dataset( + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].dataset == gca_dataset.Dataset(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_dataset( + dataset_service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_list_datasets(transport: str = 'grpc', request_type=dataset_service.ListDatasetsRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_datasets_from_dict(): + test_list_datasets(request_type=dict) + + +def test_list_datasets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + client.list_datasets() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + +@pytest.mark.asyncio +async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDatasetsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_datasets_async_from_dict(): + await test_list_datasets_async(request_type=dict) + + +def test_list_datasets_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDatasetsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = dataset_service.ListDatasetsResponse() + client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_datasets_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDatasetsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) + await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_datasets_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_datasets_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + dataset_service.ListDatasetsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_datasets_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_datasets_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_datasets( + dataset_service.ListDatasetsRequest(), + parent='parent_value', + ) + + +def test_list_datasets_pager(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_datasets(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in results) + +def test_list_datasets_pages(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = list(client.list_datasets(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_datasets_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_datasets(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in responses) + +@pytest.mark.asyncio +async def test_list_datasets_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_datasets(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_dataset(transport: str = 'grpc', request_type=dataset_service.DeleteDatasetRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_from_dict(): + test_delete_dataset(request_type=dict) + + +def test_delete_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + client.delete_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + +@pytest.mark.asyncio +async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.DeleteDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_dataset_async_from_dict(): + await test_delete_dataset_async(request_type=dict) + + +def test_delete_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.DeleteDatasetRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.DeleteDatasetRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + dataset_service.DeleteDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_dataset( + dataset_service.DeleteDatasetRequest(), + name='name_value', + ) + + +def test_import_data(transport: str = 'grpc', request_type=dataset_service.ImportDataRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_from_dict(): + test_import_data(request_type=dict) + + +def test_import_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + client.import_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + +@pytest.mark.asyncio +async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ImportDataRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_data_async_from_dict(): + await test_import_data_async(request_type=dict) + + +def test_import_data_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ImportDataRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_data_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ImportDataRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_import_data_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_data( + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].import_configs == [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] + + +def test_import_data_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + dataset_service.ImportDataRequest(), + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + +@pytest.mark.asyncio +async def test_import_data_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_data( + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].import_configs == [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] + + +@pytest.mark.asyncio +async def test_import_data_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_data( + dataset_service.ImportDataRequest(), + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + +def test_export_data(transport: str = 'grpc', request_type=dataset_service.ExportDataRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_from_dict(): + test_export_data(request_type=dict) + + +def test_export_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + client.export_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + +@pytest.mark.asyncio +async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ExportDataRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_data_async_from_dict(): + await test_export_data_async(request_type=dict) + + +def test_export_data_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ExportDataRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_data_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ExportDataRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_export_data_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_data( + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].export_config == dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + + +def test_export_data_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + dataset_service.ExportDataRequest(), + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +@pytest.mark.asyncio +async def test_export_data_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_data( + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].export_config == dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + + +@pytest.mark.asyncio +async def test_export_data_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_data( + dataset_service.ExportDataRequest(), + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +def test_list_data_items(transport: str = 'grpc', request_type=dataset_service.ListDataItemsRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataItemsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_data_items_from_dict(): + test_list_data_items(request_type=dict) + + +def test_list_data_items_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + client.list_data_items() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + +@pytest.mark.asyncio +async def test_list_data_items_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDataItemsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataItemsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_data_items_async_from_dict(): + await test_list_data_items_async(request_type=dict) + + +def test_list_data_items_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDataItemsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + call.return_value = dataset_service.ListDataItemsResponse() + client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_data_items_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDataItemsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) + await client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_data_items_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_data_items( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_data_items_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_data_items( + dataset_service.ListDataItemsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_data_items_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_data_items( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_data_items_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_data_items( + dataset_service.ListDataItemsRequest(), + parent='parent_value', + ) + + +def test_list_data_items_pager(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_data_items(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, data_item.DataItem) + for i in results) + +def test_list_data_items_pages(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + pages = list(client.list_data_items(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_data_items_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_data_items(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, data_item.DataItem) + for i in responses) + +@pytest.mark.asyncio +async def test_list_data_items_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_data_items(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_get_annotation_spec(transport: str = 'grpc', request_type=dataset_service.GetAnnotationSpecRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + ) + response = client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +def test_get_annotation_spec_from_dict(): + test_get_annotation_spec(request_type=dict) + + +def test_get_annotation_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + client.get_annotation_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetAnnotationSpecRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + )) + response = await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async_from_dict(): + await test_get_annotation_spec_async(request_type=dict) + + +def test_get_annotation_spec_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetAnnotationSpecRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = annotation_spec.AnnotationSpec() + client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_annotation_spec_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetAnnotationSpecRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_annotation_spec_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_annotation_spec_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + dataset_service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_annotation_spec( + dataset_service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +def test_list_annotations(transport: str = 'grpc', request_type=dataset_service.ListAnnotationsRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAnnotationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_annotations_from_dict(): + test_list_annotations(request_type=dict) + + +def test_list_annotations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + client.list_annotations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + +@pytest.mark.asyncio +async def test_list_annotations_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListAnnotationsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAnnotationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_annotations_async_from_dict(): + await test_list_annotations_async(request_type=dict) + + +def test_list_annotations_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListAnnotationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + call.return_value = dataset_service.ListAnnotationsResponse() + client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_annotations_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListAnnotationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) + await client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_annotations_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_annotations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_annotations_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_annotations( + dataset_service.ListAnnotationsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_annotations_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_annotations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_annotations_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_annotations( + dataset_service.ListAnnotationsRequest(), + parent='parent_value', + ) + + +def test_list_annotations_pager(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_annotations(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, annotation.Annotation) + for i in results) + +def test_list_annotations_pages(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_annotations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_annotations_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_annotations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, annotation.Annotation) + for i in responses) + +@pytest.mark.asyncio +async def test_list_annotations_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_annotations(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = DatasetServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.DatasetServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.DatasetServiceGrpcTransport, + ) + +def test_dataset_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.DatasetServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_dataset_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.DatasetServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_dataset', + 'get_dataset', + 'update_dataset', + 'list_datasets', + 'delete_dataset', + 'import_data', + 'export_data', + 'list_data_items', + 'get_annotation_spec', + 'list_annotations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_dataset_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_dataset_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_dataset_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_dataset_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DatasetServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_dataset_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DatasetServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_dataset_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_dataset_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.DatasetServiceGrpcTransport, grpc_helpers), + (transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_dataset_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_dataset_service_host_no_port(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_dataset_service_host_with_port(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_dataset_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DatasetServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_dataset_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DatasetServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_dataset_service_grpc_lro_client(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_dataset_service_grpc_lro_async_client(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotation_path(): + project = "squid" + location = "clam" + dataset = "whelk" + data_item = "octopus" + annotation = "oyster" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) + actual = DatasetServiceClient.annotation_path(project, location, dataset, data_item, annotation) + assert expected == actual + + +def test_parse_annotation_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "dataset": "mussel", + "data_item": "winkle", + "annotation": "nautilus", + } + path = DatasetServiceClient.annotation_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_annotation_path(path) + assert expected == actual + +def test_annotation_spec_path(): + project = "scallop" + location = "abalone" + dataset = "squid" + annotation_spec = "clam" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + actual = DatasetServiceClient.annotation_spec_path(project, location, dataset, annotation_spec) + assert expected == actual + + +def test_parse_annotation_spec_path(): + expected = { + "project": "whelk", + "location": "octopus", + "dataset": "oyster", + "annotation_spec": "nudibranch", + } + path = DatasetServiceClient.annotation_spec_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_annotation_spec_path(path) + assert expected == actual + +def test_data_item_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + data_item = "nautilus" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) + actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item) + assert expected == actual + + +def test_parse_data_item_path(): + expected = { + "project": "scallop", + "location": "abalone", + "dataset": "squid", + "data_item": "clam", + } + path = DatasetServiceClient.data_item_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_data_item_path(path) + assert expected == actual + +def test_dataset_path(): + project = "whelk" + location = "octopus" + dataset = "oyster" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = DatasetServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "dataset": "mussel", + } + path = DatasetServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = DatasetServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = DatasetServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = DatasetServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = DatasetServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = DatasetServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = DatasetServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = DatasetServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = DatasetServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = DatasetServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = DatasetServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = DatasetServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py new file mode 100644 index 0000000000..f8a6e964b4 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -0,0 +1,2865 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceAsyncClient +from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceClient +from google.cloud.aiplatform_v1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1.services.endpoint_service import transports +from google.cloud.aiplatform_v1.services.endpoint_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1.types import accelerator_type +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import endpoint +from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1.types import endpoint_service +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert EndpointServiceClient._get_default_mtls_endpoint(None) is None + assert EndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + EndpointServiceClient, + EndpointServiceAsyncClient, +]) +def test_endpoint_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + EndpointServiceClient, + EndpointServiceAsyncClient, +]) +def test_endpoint_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.EndpointServiceGrpcTransport, "grpc"), + (transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + EndpointServiceClient, + EndpointServiceAsyncClient, +]) +def test_endpoint_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_endpoint_service_client_get_transport_class(): + transport = EndpointServiceClient.get_transport_class() + available_transports = [ + transports.EndpointServiceGrpcTransport, + ] + assert transport in available_transports + + transport = EndpointServiceClient.get_transport_class("grpc") + assert transport == transports.EndpointServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +def test_endpoint_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "true"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "false"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_endpoint_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = EndpointServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_endpoint(transport: str = 'grpc', request_type=endpoint_service.CreateEndpointRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_endpoint_from_dict(): + test_create_endpoint(request_type=dict) + + +def test_create_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + client.create_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + +@pytest.mark.asyncio +async def test_create_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.CreateEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_endpoint_async_from_dict(): + await test_create_endpoint_async(request_type=dict) + + +def test_create_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.CreateEndpointRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.CreateEndpointRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_endpoint( + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') + + +def test_create_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_endpoint( + endpoint_service.CreateEndpointRequest(), + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_endpoint( + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') + + +@pytest.mark.asyncio +async def test_create_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_endpoint( + endpoint_service.CreateEndpointRequest(), + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + ) + + +def test_get_endpoint(transport: str = 'grpc', request_type=endpoint_service.GetEndpointRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_get_endpoint_from_dict(): + test_get_endpoint(request_type=dict) + + +def test_get_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + client.get_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + +@pytest.mark.asyncio +async def test_get_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.GetEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_endpoint_async_from_dict(): + await test_get_endpoint_async(request_type=dict) + + +def test_get_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.GetEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + call.return_value = endpoint.Endpoint() + client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.GetEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) + await client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_endpoint( + endpoint_service.GetEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_endpoint( + endpoint_service.GetEndpointRequest(), + name='name_value', + ) + + +def test_list_endpoints(transport: str = 'grpc', request_type=endpoint_service.ListEndpointsRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEndpointsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_endpoints_from_dict(): + test_list_endpoints(request_type=dict) + + +def test_list_endpoints_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + client.list_endpoints() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + +@pytest.mark.asyncio +async def test_list_endpoints_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.ListEndpointsRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEndpointsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_endpoints_async_from_dict(): + await test_list_endpoints_async(request_type=dict) + + +def test_list_endpoints_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.ListEndpointsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + call.return_value = endpoint_service.ListEndpointsResponse() + client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_endpoints_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.ListEndpointsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) + await client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_endpoints_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_endpoints_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_endpoints( + endpoint_service.ListEndpointsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_endpoints_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_endpoints_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_endpoints( + endpoint_service.ListEndpointsRequest(), + parent='parent_value', + ) + + +def test_list_endpoints_pager(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_endpoints(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, endpoint.Endpoint) + for i in results) + +def test_list_endpoints_pages(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + pages = list(client.list_endpoints(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_endpoints_async_pager(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_endpoints(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, endpoint.Endpoint) + for i in responses) + +@pytest.mark.asyncio +async def test_list_endpoints_async_pages(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_endpoints(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_endpoint(transport: str = 'grpc', request_type=endpoint_service.UpdateEndpointRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_update_endpoint_from_dict(): + test_update_endpoint(request_type=dict) + + +def test_update_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + client.update_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + +@pytest.mark.asyncio +async def test_update_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UpdateEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_endpoint_async_from_dict(): + await test_update_endpoint_async(request_type=dict) + + +def test_update_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UpdateEndpointRequest() + + request.endpoint.name = 'endpoint.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + call.return_value = gca_endpoint.Endpoint() + client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint.name=endpoint.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UpdateEndpointRequest() + + request.endpoint.name = 'endpoint.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) + await client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint.name=endpoint.name/value', + ) in kw['metadata'] + + +def test_update_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_endpoint( + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_endpoint( + endpoint_service.UpdateEndpointRequest(), + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_endpoint( + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_endpoint( + endpoint_service.UpdateEndpointRequest(), + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_endpoint(transport: str = 'grpc', request_type=endpoint_service.DeleteEndpointRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_endpoint_from_dict(): + test_delete_endpoint(request_type=dict) + + +def test_delete_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + client.delete_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + +@pytest.mark.asyncio +async def test_delete_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeleteEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_endpoint_async_from_dict(): + await test_delete_endpoint_async(request_type=dict) + + +def test_delete_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeleteEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeleteEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_endpoint( + endpoint_service.DeleteEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_endpoint( + endpoint_service.DeleteEndpointRequest(), + name='name_value', + ) + + +def test_deploy_model(transport: str = 'grpc', request_type=endpoint_service.DeployModelRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_from_dict(): + test_deploy_model(request_type=dict) + + +def test_deploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + client.deploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + +@pytest.mark.asyncio +async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeployModelRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_model_async_from_dict(): + await test_deploy_model_async(request_type=dict) + + +def test_deploy_model_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeployModelRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_deploy_model_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeployModelRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +def test_deploy_model_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_model( + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].deployed_model == gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) + assert args[0].traffic_split == {'key_value': 541} + + +def test_deploy_model_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + endpoint_service.DeployModelRequest(), + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + +@pytest.mark.asyncio +async def test_deploy_model_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_model( + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].deployed_model == gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) + assert args[0].traffic_split == {'key_value': 541} + + +@pytest.mark.asyncio +async def test_deploy_model_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_model( + endpoint_service.DeployModelRequest(), + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + +def test_undeploy_model(transport: str = 'grpc', request_type=endpoint_service.UndeployModelRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_from_dict(): + test_undeploy_model(request_type=dict) + + +def test_undeploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + client.undeploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + +@pytest.mark.asyncio +async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UndeployModelRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_model_async_from_dict(): + await test_undeploy_model_async(request_type=dict) + + +def test_undeploy_model_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UndeployModelRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undeploy_model_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UndeployModelRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +def test_undeploy_model_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_model( + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].deployed_model_id == 'deployed_model_id_value' + assert args[0].traffic_split == {'key_value': 541} + + +def test_undeploy_model_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + endpoint_service.UndeployModelRequest(), + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_model( + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].deployed_model_id == 'deployed_model_id_value' + assert args[0].traffic_split == {'key_value': 541} + + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_model( + endpoint_service.UndeployModelRequest(), + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = EndpointServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.EndpointServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.EndpointServiceGrpcTransport, + ) + +def test_endpoint_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.EndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_endpoint_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.EndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_endpoint', + 'get_endpoint', + 'list_endpoints', + 'update_endpoint', + 'delete_endpoint', + 'deploy_model', + 'undeploy_model', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_endpoint_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_endpoint_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_endpoint_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_endpoint_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + EndpointServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_endpoint_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + EndpointServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_endpoint_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_endpoint_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.EndpointServiceGrpcTransport, grpc_helpers), + (transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_endpoint_service_host_no_port(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_endpoint_service_host_with_port(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_endpoint_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EndpointServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_endpoint_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EndpointServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_endpoint_service_grpc_lro_client(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_service_grpc_lro_async_client(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = EndpointServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = EndpointServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = EndpointServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = EndpointServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_model_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = EndpointServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = EndpointServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = EndpointServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = EndpointServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = EndpointServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = EndpointServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = EndpointServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = EndpointServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = EndpointServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = EndpointServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = EndpointServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py new file mode 100644 index 0000000000..5f08ed6740 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -0,0 +1,6666 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.job_service import JobServiceAsyncClient +from google.cloud.aiplatform_v1.services.job_service import JobServiceClient +from google.cloud.aiplatform_v1.services.job_service import pagers +from google.cloud.aiplatform_v1.services.job_service import transports +from google.cloud.aiplatform_v1.services.job_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1.types import accelerator_type +from google.cloud.aiplatform_v1.types import batch_prediction_job +from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1.types import completion_stats +from google.cloud.aiplatform_v1.types import custom_job +from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1.types import data_labeling_job +from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import job_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import study +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert JobServiceClient._get_default_mtls_endpoint(None) is None + assert JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + JobServiceClient, + JobServiceAsyncClient, +]) +def test_job_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + JobServiceClient, + JobServiceAsyncClient, +]) +def test_job_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.JobServiceGrpcTransport, "grpc"), + (transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + JobServiceClient, + JobServiceAsyncClient, +]) +def test_job_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_job_service_client_get_transport_class(): + transport = JobServiceClient.get_transport_class() + available_transports = [ + transports.JobServiceGrpcTransport, + ] + assert transport in available_transports + + transport = JobServiceClient.get_transport_class("grpc") + assert transport == transports.JobServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +def test_job_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_job_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_job_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = JobServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_custom_job(transport: str = 'grpc', request_type=job_service.CreateCustomJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_custom_job_from_dict(): + test_create_custom_job(request_type=dict) + + +def test_create_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + client.create_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + +@pytest.mark.asyncio +async def test_create_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_custom_job_async_from_dict(): + await test_create_custom_job_async(request_type=dict) + + +def test_create_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateCustomJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + call.return_value = gca_custom_job.CustomJob() + client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateCustomJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) + await client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_custom_job( + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].custom_job == gca_custom_job.CustomJob(name='name_value') + + +def test_create_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_custom_job( + job_service.CreateCustomJobRequest(), + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_custom_job( + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].custom_job == gca_custom_job.CustomJob(name='name_value') + + +@pytest.mark.asyncio +async def test_create_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_custom_job( + job_service.CreateCustomJobRequest(), + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + +def test_get_custom_job(transport: str = 'grpc', request_type=job_service.GetCustomJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_custom_job_from_dict(): + test_get_custom_job(request_type=dict) + + +def test_get_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + client.get_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + +@pytest.mark.asyncio +async def test_get_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_custom_job_async_from_dict(): + await test_get_custom_job_async(request_type=dict) + + +def test_get_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + call.return_value = custom_job.CustomJob() + client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) + await client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_custom_job( + job_service.GetCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_custom_job( + job_service.GetCustomJobRequest(), + name='name_value', + ) + + +def test_list_custom_jobs(transport: str = 'grpc', request_type=job_service.ListCustomJobsRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCustomJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_custom_jobs_from_dict(): + test_list_custom_jobs(request_type=dict) + + +def test_list_custom_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + client.list_custom_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + +@pytest.mark.asyncio +async def test_list_custom_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListCustomJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCustomJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_from_dict(): + await test_list_custom_jobs_async(request_type=dict) + + +def test_list_custom_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListCustomJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + call.return_value = job_service.ListCustomJobsResponse() + client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_custom_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListCustomJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) + await client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_custom_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_custom_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_custom_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_custom_jobs( + job_service.ListCustomJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_custom_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_custom_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_custom_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_custom_jobs( + job_service.ListCustomJobsRequest(), + parent='parent_value', + ) + + +def test_list_custom_jobs_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_custom_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, custom_job.CustomJob) + for i in results) + +def test_list_custom_jobs_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_custom_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_custom_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, custom_job.CustomJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_custom_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_custom_job(transport: str = 'grpc', request_type=job_service.DeleteCustomJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_custom_job_from_dict(): + test_delete_custom_job(request_type=dict) + + +def test_delete_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + client.delete_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + +@pytest.mark.asyncio +async def test_delete_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_custom_job_async_from_dict(): + await test_delete_custom_job_async(request_type=dict) + + +def test_delete_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_custom_job( + job_service.DeleteCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_custom_job( + job_service.DeleteCustomJobRequest(), + name='name_value', + ) + + +def test_cancel_custom_job(transport: str = 'grpc', request_type=job_service.CancelCustomJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_custom_job_from_dict(): + test_cancel_custom_job(request_type=dict) + + +def test_cancel_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + client.cancel_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_custom_job_async_from_dict(): + await test_cancel_custom_job_async(request_type=dict) + + +def test_cancel_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + call.return_value = None + client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_custom_job( + job_service.CancelCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_custom_job( + job_service.CancelCustomJobRequest(), + name='name_value', + ) + + +def test_create_data_labeling_job(transport: str = 'grpc', request_type=job_service.CreateDataLabelingJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + ) + response = client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +def test_create_data_labeling_job_from_dict(): + test_create_data_labeling_job(request_type=dict) + + +def test_create_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + client.create_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + )) + response = await client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_async_from_dict(): + await test_create_data_labeling_job_async(request_type=dict) + + +def test_create_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateDataLabelingJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + call.return_value = gca_data_labeling_job.DataLabelingJob() + client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateDataLabelingJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) + await client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_data_labeling_job( + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].data_labeling_job == gca_data_labeling_job.DataLabelingJob(name='name_value') + + +def test_create_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_data_labeling_job( + job_service.CreateDataLabelingJobRequest(), + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_data_labeling_job( + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].data_labeling_job == gca_data_labeling_job.DataLabelingJob(name='name_value') + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_data_labeling_job( + job_service.CreateDataLabelingJobRequest(), + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + +def test_get_data_labeling_job(transport: str = 'grpc', request_type=job_service.GetDataLabelingJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + ) + response = client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +def test_get_data_labeling_job_from_dict(): + test_get_data_labeling_job(request_type=dict) + + +def test_get_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + client.get_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + )) + response = await client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_async_from_dict(): + await test_get_data_labeling_job_async(request_type=dict) + + +def test_get_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + call.return_value = data_labeling_job.DataLabelingJob() + client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) + await client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_data_labeling_job( + job_service.GetDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_data_labeling_job( + job_service.GetDataLabelingJobRequest(), + name='name_value', + ) + + +def test_list_data_labeling_jobs(transport: str = 'grpc', request_type=job_service.ListDataLabelingJobsRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataLabelingJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_data_labeling_jobs_from_dict(): + test_list_data_labeling_jobs(request_type=dict) + + +def test_list_data_labeling_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + client.list_data_labeling_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListDataLabelingJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_from_dict(): + await test_list_data_labeling_jobs_async(request_type=dict) + + +def test_list_data_labeling_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListDataLabelingJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + call.return_value = job_service.ListDataLabelingJobsResponse() + client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListDataLabelingJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) + await client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_data_labeling_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_data_labeling_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_data_labeling_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_data_labeling_jobs( + job_service.ListDataLabelingJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_data_labeling_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_data_labeling_jobs( + job_service.ListDataLabelingJobsRequest(), + parent='parent_value', + ) + + +def test_list_data_labeling_jobs_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_data_labeling_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, data_labeling_job.DataLabelingJob) + for i in results) + +def test_list_data_labeling_jobs_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_data_labeling_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_data_labeling_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, data_labeling_job.DataLabelingJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_data_labeling_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_data_labeling_job(transport: str = 'grpc', request_type=job_service.DeleteDataLabelingJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_data_labeling_job_from_dict(): + test_delete_data_labeling_job(request_type=dict) + + +def test_delete_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + client.delete_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_async_from_dict(): + await test_delete_data_labeling_job_async(request_type=dict) + + +def test_delete_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_data_labeling_job( + job_service.DeleteDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_data_labeling_job( + job_service.DeleteDataLabelingJobRequest(), + name='name_value', + ) + + +def test_cancel_data_labeling_job(transport: str = 'grpc', request_type=job_service.CancelDataLabelingJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_data_labeling_job_from_dict(): + test_cancel_data_labeling_job(request_type=dict) + + +def test_cancel_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + client.cancel_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_async_from_dict(): + await test_cancel_data_labeling_job_async(request_type=dict) + + +def test_cancel_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + call.return_value = None + client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_data_labeling_job( + job_service.CancelDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_data_labeling_job( + job_service.CancelDataLabelingJobRequest(), + name='name_value', + ) + + +def test_create_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CreateHyperparameterTuningJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_hyperparameter_tuning_job_from_dict(): + test_create_hyperparameter_tuning_job(request_type=dict) + + +def test_create_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + client.create_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_async_from_dict(): + await test_create_hyperparameter_tuning_job_async(request_type=dict) + + +def test_create_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateHyperparameterTuningJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateHyperparameterTuningJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) + await client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_hyperparameter_tuning_job( + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].hyperparameter_tuning_job == gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') + + +def test_create_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_hyperparameter_tuning_job( + job_service.CreateHyperparameterTuningJobRequest(), + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_hyperparameter_tuning_job( + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].hyperparameter_tuning_job == gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_hyperparameter_tuning_job( + job_service.CreateHyperparameterTuningJobRequest(), + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + +def test_get_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.GetHyperparameterTuningJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_hyperparameter_tuning_job_from_dict(): + test_get_hyperparameter_tuning_job(request_type=dict) + + +def test_get_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + client.get_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_async_from_dict(): + await test_get_hyperparameter_tuning_job_async(request_type=dict) + + +def test_get_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) + await client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_hyperparameter_tuning_job( + job_service.GetHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_hyperparameter_tuning_job( + job_service.GetHyperparameterTuningJobRequest(), + name='name_value', + ) + + +def test_list_hyperparameter_tuning_jobs(transport: str = 'grpc', request_type=job_service.ListHyperparameterTuningJobsRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHyperparameterTuningJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_hyperparameter_tuning_jobs_from_dict(): + test_list_hyperparameter_tuning_jobs(request_type=dict) + + +def test_list_hyperparameter_tuning_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + client.list_hyperparameter_tuning_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListHyperparameterTuningJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_from_dict(): + await test_list_hyperparameter_tuning_jobs_async(request_type=dict) + + +def test_list_hyperparameter_tuning_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListHyperparameterTuningJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListHyperparameterTuningJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) + await client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_hyperparameter_tuning_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_hyperparameter_tuning_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_hyperparameter_tuning_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hyperparameter_tuning_jobs( + job_service.ListHyperparameterTuningJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_hyperparameter_tuning_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_hyperparameter_tuning_jobs( + job_service.ListHyperparameterTuningJobsRequest(), + parent='parent_value', + ) + + +def test_list_hyperparameter_tuning_jobs_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_hyperparameter_tuning_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) + for i in results) + +def test_list_hyperparameter_tuning_jobs_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_hyperparameter_tuning_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_hyperparameter_tuning_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.DeleteHyperparameterTuningJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_hyperparameter_tuning_job_from_dict(): + test_delete_hyperparameter_tuning_job(request_type=dict) + + +def test_delete_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + client.delete_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_async_from_dict(): + await test_delete_hyperparameter_tuning_job_async(request_type=dict) + + +def test_delete_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_hyperparameter_tuning_job( + job_service.DeleteHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_hyperparameter_tuning_job( + job_service.DeleteHyperparameterTuningJobRequest(), + name='name_value', + ) + + +def test_cancel_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CancelHyperparameterTuningJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_hyperparameter_tuning_job_from_dict(): + test_cancel_hyperparameter_tuning_job(request_type=dict) + + +def test_cancel_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + client.cancel_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_async_from_dict(): + await test_cancel_hyperparameter_tuning_job_async(request_type=dict) + + +def test_cancel_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = None + client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_hyperparameter_tuning_job( + job_service.CancelHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_hyperparameter_tuning_job( + job_service.CancelHyperparameterTuningJobRequest(), + name='name_value', + ) + + +def test_create_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CreateBatchPredictionJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_batch_prediction_job_from_dict(): + test_create_batch_prediction_job(request_type=dict) + + +def test_create_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + client.create_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_async_from_dict(): + await test_create_batch_prediction_job_async(request_type=dict) + + +def test_create_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateBatchPredictionJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateBatchPredictionJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) + await client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_batch_prediction_job( + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].batch_prediction_job == gca_batch_prediction_job.BatchPredictionJob(name='name_value') + + +def test_create_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_batch_prediction_job( + job_service.CreateBatchPredictionJobRequest(), + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_batch_prediction_job( + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].batch_prediction_job == gca_batch_prediction_job.BatchPredictionJob(name='name_value') + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_batch_prediction_job( + job_service.CreateBatchPredictionJobRequest(), + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + +def test_get_batch_prediction_job(transport: str = 'grpc', request_type=job_service.GetBatchPredictionJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_batch_prediction_job_from_dict(): + test_get_batch_prediction_job(request_type=dict) + + +def test_get_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + client.get_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_async_from_dict(): + await test_get_batch_prediction_job_async(request_type=dict) + + +def test_get_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + call.return_value = batch_prediction_job.BatchPredictionJob() + client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) + await client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_batch_prediction_job( + job_service.GetBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_batch_prediction_job( + job_service.GetBatchPredictionJobRequest(), + name='name_value', + ) + + +def test_list_batch_prediction_jobs(transport: str = 'grpc', request_type=job_service.ListBatchPredictionJobsRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBatchPredictionJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_batch_prediction_jobs_from_dict(): + test_list_batch_prediction_jobs(request_type=dict) + + +def test_list_batch_prediction_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + client.list_batch_prediction_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListBatchPredictionJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_from_dict(): + await test_list_batch_prediction_jobs_async(request_type=dict) + + +def test_list_batch_prediction_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListBatchPredictionJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + call.return_value = job_service.ListBatchPredictionJobsResponse() + client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListBatchPredictionJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) + await client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_batch_prediction_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_batch_prediction_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_batch_prediction_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_batch_prediction_jobs( + job_service.ListBatchPredictionJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_batch_prediction_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_batch_prediction_jobs( + job_service.ListBatchPredictionJobsRequest(), + parent='parent_value', + ) + + +def test_list_batch_prediction_jobs_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_batch_prediction_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) + for i in results) + +def test_list_batch_prediction_jobs_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_batch_prediction_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_batch_prediction_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_batch_prediction_job(transport: str = 'grpc', request_type=job_service.DeleteBatchPredictionJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_batch_prediction_job_from_dict(): + test_delete_batch_prediction_job(request_type=dict) + + +def test_delete_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + client.delete_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_async_from_dict(): + await test_delete_batch_prediction_job_async(request_type=dict) + + +def test_delete_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_batch_prediction_job( + job_service.DeleteBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_batch_prediction_job( + job_service.DeleteBatchPredictionJobRequest(), + name='name_value', + ) + + +def test_cancel_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CancelBatchPredictionJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_batch_prediction_job_from_dict(): + test_cancel_batch_prediction_job(request_type=dict) + + +def test_cancel_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + client.cancel_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_async_from_dict(): + await test_cancel_batch_prediction_job_async(request_type=dict) + + +def test_cancel_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + call.return_value = None + client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_batch_prediction_job( + job_service.CancelBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_batch_prediction_job( + job_service.CancelBatchPredictionJobRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = JobServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.JobServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.JobServiceGrpcTransport, + ) + +def test_job_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.JobServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_job_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.JobServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_custom_job', + 'get_custom_job', + 'list_custom_jobs', + 'delete_custom_job', + 'cancel_custom_job', + 'create_data_labeling_job', + 'get_data_labeling_job', + 'list_data_labeling_jobs', + 'delete_data_labeling_job', + 'cancel_data_labeling_job', + 'create_hyperparameter_tuning_job', + 'get_hyperparameter_tuning_job', + 'list_hyperparameter_tuning_jobs', + 'delete_hyperparameter_tuning_job', + 'cancel_hyperparameter_tuning_job', + 'create_batch_prediction_job', + 'get_batch_prediction_job', + 'list_batch_prediction_jobs', + 'delete_batch_prediction_job', + 'cancel_batch_prediction_job', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_job_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_job_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_job_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_job_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_job_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_job_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_job_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.JobServiceGrpcTransport, grpc_helpers), + (transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_job_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_job_service_host_no_port(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_job_service_host_with_port(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_job_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_job_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_job_service_grpc_lro_client(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_job_service_grpc_lro_async_client(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_batch_prediction_job_path(): + project = "squid" + location = "clam" + batch_prediction_job = "whelk" + expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) + actual = JobServiceClient.batch_prediction_job_path(project, location, batch_prediction_job) + assert expected == actual + + +def test_parse_batch_prediction_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "batch_prediction_job": "nudibranch", + } + path = JobServiceClient.batch_prediction_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_batch_prediction_job_path(path) + assert expected == actual + +def test_custom_job_path(): + project = "cuttlefish" + location = "mussel" + custom_job = "winkle" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = JobServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "custom_job": "abalone", + } + path = JobServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_data_labeling_job_path(): + project = "squid" + location = "clam" + data_labeling_job = "whelk" + expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) + actual = JobServiceClient.data_labeling_job_path(project, location, data_labeling_job) + assert expected == actual + + +def test_parse_data_labeling_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "data_labeling_job": "nudibranch", + } + path = JobServiceClient.data_labeling_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_data_labeling_job_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = JobServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", + } + path = JobServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_hyperparameter_tuning_job_path(): + project = "squid" + location = "clam" + hyperparameter_tuning_job = "whelk" + expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) + actual = JobServiceClient.hyperparameter_tuning_job_path(project, location, hyperparameter_tuning_job) + assert expected == actual + + +def test_parse_hyperparameter_tuning_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "hyperparameter_tuning_job": "nudibranch", + } + path = JobServiceClient.hyperparameter_tuning_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = JobServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = JobServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_model_path(path) + assert expected == actual + +def test_network_path(): + project = "squid" + network = "clam" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = JobServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = JobServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_network_path(path) + assert expected == actual + +def test_trial_path(): + project = "oyster" + location = "nudibranch" + study = "cuttlefish" + trial = "mussel" + expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + actual = JobServiceClient.trial_path(project, location, study, trial) + assert expected == actual + + +def test_parse_trial_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "study": "scallop", + "trial": "abalone", + } + path = JobServiceClient.trial_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_trial_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = JobServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = JobServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = JobServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = JobServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = JobServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = JobServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = JobServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = JobServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = JobServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = JobServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = JobServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py new file mode 100644 index 0000000000..829b03be6e --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -0,0 +1,1753 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceAsyncClient +from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceClient +from google.cloud.aiplatform_v1.services.migration_service import pagers +from google.cloud.aiplatform_v1.services.migration_service import transports +from google.cloud.aiplatform_v1.services.migration_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1.types import migratable_resource +from google.cloud.aiplatform_v1.types import migration_service +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MigrationServiceClient._get_default_mtls_endpoint(None) is None + assert MigrationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + MigrationServiceClient, + MigrationServiceAsyncClient, +]) +def test_migration_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + MigrationServiceClient, + MigrationServiceAsyncClient, +]) +def test_migration_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.MigrationServiceGrpcTransport, "grpc"), + (transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + MigrationServiceClient, + MigrationServiceAsyncClient, +]) +def test_migration_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_migration_service_client_get_transport_class(): + transport = MigrationServiceClient.get_transport_class() + available_transports = [ + transports.MigrationServiceGrpcTransport, + ] + assert transport in available_transports + + transport = MigrationServiceClient.get_transport_class("grpc") + assert transport == transports.MigrationServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +def test_migration_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "true"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "false"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_migration_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_migration_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = MigrationServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_search_migratable_resources(transport: str = 'grpc', request_type=migration_service.SearchMigratableResourcesRequest): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchMigratableResourcesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_migratable_resources_from_dict(): + test_search_migratable_resources(request_type=dict) + + +def test_search_migratable_resources_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + client.search_migratable_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + +@pytest.mark.asyncio +async def test_search_migratable_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.SearchMigratableResourcesRequest): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_from_dict(): + await test_search_migratable_resources_async(request_type=dict) + + +def test_search_migratable_resources_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.SearchMigratableResourcesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + call.return_value = migration_service.SearchMigratableResourcesResponse() + client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_migratable_resources_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.SearchMigratableResourcesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) + await client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_search_migratable_resources_flattened(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_migratable_resources( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_search_migratable_resources_flattened_error(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_migratable_resources( + migration_service.SearchMigratableResourcesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_search_migratable_resources_flattened_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_migratable_resources( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_search_migratable_resources_flattened_error_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_migratable_resources( + migration_service.SearchMigratableResourcesRequest(), + parent='parent_value', + ) + + +def test_search_migratable_resources_pager(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.search_migratable_resources(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, migratable_resource.MigratableResource) + for i in results) + +def test_search_migratable_resources_pages(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + pages = list(client.search_migratable_resources(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_pager(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_migratable_resources(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, migratable_resource.MigratableResource) + for i in responses) + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_pages(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_migratable_resources(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_batch_migrate_resources(transport: str = 'grpc', request_type=migration_service.BatchMigrateResourcesRequest): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_migrate_resources_from_dict(): + test_batch_migrate_resources(request_type=dict) + + +def test_batch_migrate_resources_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + client.batch_migrate_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.BatchMigrateResourcesRequest): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_async_from_dict(): + await test_batch_migrate_resources_async(request_type=dict) + + +def test_batch_migrate_resources_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.BatchMigrateResourcesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.BatchMigrateResourcesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_batch_migrate_resources_flattened(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_migrate_resources( + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].migrate_resource_requests == [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] + + +def test_batch_migrate_resources_flattened_error(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_migrate_resources( + migration_service.BatchMigrateResourcesRequest(), + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_flattened_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_migrate_resources( + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].migrate_resource_requests == [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_flattened_error_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_migrate_resources( + migration_service.BatchMigrateResourcesRequest(), + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = MigrationServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MigrationServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.MigrationServiceGrpcTransport, + ) + +def test_migration_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.MigrationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_migration_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.MigrationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'search_migratable_resources', + 'batch_migrate_resources', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_migration_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_migration_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_migration_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_migration_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MigrationServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_migration_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MigrationServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_migration_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_migration_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.MigrationServiceGrpcTransport, grpc_helpers), + (transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_migration_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_migration_service_host_no_port(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_migration_service_host_with_port(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_migration_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MigrationServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_migration_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MigrationServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_migration_service_grpc_lro_client(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_migration_service_grpc_lro_async_client(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotated_dataset_path(): + project = "squid" + dataset = "clam" + annotated_dataset = "whelk" + expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) + actual = MigrationServiceClient.annotated_dataset_path(project, dataset, annotated_dataset) + assert expected == actual + + +def test_parse_annotated_dataset_path(): + expected = { + "project": "octopus", + "dataset": "oyster", + "annotated_dataset": "nudibranch", + } + path = MigrationServiceClient.annotated_dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_annotated_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "squid" + location = "clam" + dataset = "whelk" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "winkle", + "dataset": "nautilus", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_model_path(): + project = "scallop" + location = "abalone" + model = "squid" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = MigrationServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "clam", + "location": "whelk", + "model": "octopus", + } + path = MigrationServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_path(): + project = "oyster" + location = "nudibranch" + model = "cuttlefish" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = MigrationServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "mussel", + "location": "winkle", + "model": "nautilus", + } + path = MigrationServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_model_path(path) + assert expected == actual + +def test_version_path(): + project = "scallop" + model = "abalone" + version = "squid" + expected = "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) + actual = MigrationServiceClient.version_path(project, model, version) + assert expected == actual + + +def test_parse_version_path(): + expected = { + "project": "clam", + "model": "whelk", + "version": "octopus", + } + path = MigrationServiceClient.version_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_version_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = MigrationServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = MigrationServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format(folder=folder, ) + actual = MigrationServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = MigrationServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format(organization=organization, ) + actual = MigrationServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = MigrationServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format(project=project, ) + actual = MigrationServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = MigrationServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = MigrationServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = MigrationServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = MigrationServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py new file mode 100644 index 0000000000..1db06d340b --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py @@ -0,0 +1,4051 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.model_service import ModelServiceAsyncClient +from google.cloud.aiplatform_v1.services.model_service import ModelServiceClient +from google.cloud.aiplatform_v1.services.model_service import pagers +from google.cloud.aiplatform_v1.services.model_service import transports +from google.cloud.aiplatform_v1.services.model_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import model as gca_model +from google.cloud.aiplatform_v1.types import model_evaluation +from google.cloud.aiplatform_v1.types import model_evaluation_slice +from google.cloud.aiplatform_v1.types import model_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ModelServiceClient._get_default_mtls_endpoint(None) is None + assert ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ModelServiceClient, + ModelServiceAsyncClient, +]) +def test_model_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + ModelServiceClient, + ModelServiceAsyncClient, +]) +def test_model_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ModelServiceGrpcTransport, "grpc"), + (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + ModelServiceClient, + ModelServiceAsyncClient, +]) +def test_model_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_model_service_client_get_transport_class(): + transport = ModelServiceClient.get_transport_class() + available_transports = [ + transports.ModelServiceGrpcTransport, + ] + assert transport in available_transports + + transport = ModelServiceClient.get_transport_class("grpc") + assert transport == transports.ModelServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +def test_model_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_model_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_model_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = ModelServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_upload_model(transport: str = 'grpc', request_type=model_service.UploadModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_upload_model_from_dict(): + test_upload_model(request_type=dict) + + +def test_upload_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + client.upload_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + +@pytest.mark.asyncio +async def test_upload_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UploadModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_upload_model_async_from_dict(): + await test_upload_model_async(request_type=dict) + + +def test_upload_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UploadModelRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_upload_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UploadModelRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_upload_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.upload_model( + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].model == gca_model.Model(name='name_value') + + +def test_upload_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upload_model( + model_service.UploadModelRequest(), + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_upload_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.upload_model( + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].model == gca_model.Model(name='name_value') + + +@pytest.mark.asyncio +async def test_upload_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.upload_model( + model_service.UploadModelRequest(), + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + +def test_get_model(transport: str = 'grpc', request_type=model_service.GetModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_get_model_from_dict(): + test_get_model(request_type=dict) + + +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + +@pytest.mark.asyncio +async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_model_async_from_dict(): + await test_get_model_async(request_type=dict) + + +def test_get_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = model.Model() + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + model_service.GetModelRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + model_service.GetModelRequest(), + name='name_value', + ) + + +def test_list_models(transport: str = 'grpc', request_type=model_service.ListModelsRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_models_from_dict(): + test_list_models(request_type=dict) + + +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + +@pytest.mark.asyncio +async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_models_async_from_dict(): + await test_list_models_async(request_type=dict) + + +def test_list_models_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = model_service.ListModelsResponse() + client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_models_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) + await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_models_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_models_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + model_service.ListModelsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + model_service.ListModelsRequest(), + parent='parent_value', + ) + + +def test_list_models_pager(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) + +def test_list_models_pages(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_models(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) + for i in responses) + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_models(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_model(transport: str = 'grpc', request_type=model_service.UpdateModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_update_model_from_dict(): + test_update_model(request_type=dict) + + +def test_update_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + client.update_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + +@pytest.mark.asyncio +async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UpdateModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_model_async_from_dict(): + await test_update_model_async(request_type=dict) + + +def test_update_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = 'model.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = gca_model.Model() + client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=model.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = 'model.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=model.name/value', + ) in kw['metadata'] + + +def test_update_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model( + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].model == gca_model.Model(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model( + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].model == gca_model.Model(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_model(transport: str = 'grpc', request_type=model_service.DeleteModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_from_dict(): + test_delete_model(request_type=dict) + + +def test_delete_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + client.delete_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + +@pytest.mark.asyncio +async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_async_from_dict(): + await test_delete_model_async(request_type=dict) + + +def test_delete_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + model_service.DeleteModelRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model( + model_service.DeleteModelRequest(), + name='name_value', + ) + + +def test_export_model(transport: str = 'grpc', request_type=model_service.ExportModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_from_dict(): + test_export_model(request_type=dict) + + +def test_export_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + client.export_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + +@pytest.mark.asyncio +async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=model_service.ExportModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_model_async_from_dict(): + await test_export_model_async(request_type=dict) + + +def test_export_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ExportModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ExportModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_export_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_model( + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].output_config == model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') + + +def test_export_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + model_service.ExportModelRequest(), + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + +@pytest.mark.asyncio +async def test_export_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_model( + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].output_config == model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') + + +@pytest.mark.asyncio +async def test_export_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_model( + model_service.ExportModelRequest(), + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + +def test_get_model_evaluation(transport: str = 'grpc', request_type=model_service.GetModelEvaluationRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + ) + response = client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + + +def test_get_model_evaluation_from_dict(): + test_get_model_evaluation(request_type=dict) + + +def test_get_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + client.get_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + )) + response = await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async_from_dict(): + await test_get_model_evaluation_async(request_type=dict) + + +def test_get_model_evaluation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = model_evaluation.ModelEvaluation() + client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_model_evaluation_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_model_evaluation_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + model_service.GetModelEvaluationRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation( + model_service.GetModelEvaluationRequest(), + name='name_value', + ) + + +def test_list_model_evaluations(transport: str = 'grpc', request_type=model_service.ListModelEvaluationsRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluations_from_dict(): + test_list_model_evaluations(request_type=dict) + + +def test_list_model_evaluations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + client.list_model_evaluations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_from_dict(): + await test_list_model_evaluations_async(request_type=dict) + + +def test_list_model_evaluations_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = model_service.ListModelEvaluationsResponse() + client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluations_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) + await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_model_evaluations_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_model_evaluations_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + model_service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluations( + model_service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluations_pager(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluations(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in results) + +def test_list_model_evaluations_pages(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in responses) + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_evaluations(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_get_model_evaluation_slice(transport: str = 'grpc', request_type=model_service.GetModelEvaluationSliceRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + ) + response = client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + + +def test_get_model_evaluation_slice_from_dict(): + test_get_model_evaluation_slice(request_type=dict) + + +def test_get_model_evaluation_slice_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + client.get_model_evaluation_slice() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationSliceRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + )) + response = await client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_async_from_dict(): + await test_get_model_evaluation_slice_async(request_type=dict) + + +def test_get_model_evaluation_slice_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationSliceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationSliceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) + await client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_model_evaluation_slice_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation_slice( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_model_evaluation_slice_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation_slice( + model_service.GetModelEvaluationSliceRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation_slice( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation_slice( + model_service.GetModelEvaluationSliceRequest(), + name='name_value', + ) + + +def test_list_model_evaluation_slices(transport: str = 'grpc', request_type=model_service.ListModelEvaluationSlicesRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationSlicesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluation_slices_from_dict(): + test_list_model_evaluation_slices(request_type=dict) + + +def test_list_model_evaluation_slices_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + client.list_model_evaluation_slices() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationSlicesRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationSlicesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_from_dict(): + await test_list_model_evaluation_slices_async(request_type=dict) + + +def test_list_model_evaluation_slices_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationSlicesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + call.return_value = model_service.ListModelEvaluationSlicesResponse() + client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationSlicesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) + await client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_model_evaluation_slices_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluation_slices( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_model_evaluation_slices_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluation_slices( + model_service.ListModelEvaluationSlicesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluation_slices( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluation_slices( + model_service.ListModelEvaluationSlicesRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluation_slices_pager(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluation_slices(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) + for i in results) + +def test_list_model_evaluation_slices_pages(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluation_slices(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluation_slices(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) + for i in responses) + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_evaluation_slices(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ModelServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ModelServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ModelServiceGrpcTransport, + ) + +def test_model_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_model_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'upload_model', + 'get_model', + 'list_models', + 'update_model', + 'delete_model', + 'export_model', + 'get_model_evaluation', + 'list_model_evaluations', + 'get_model_evaluation_slice', + 'list_model_evaluation_slices', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_model_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_model_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_model_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_model_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ModelServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_model_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ModelServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_model_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_model_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ModelServiceGrpcTransport, grpc_helpers), + (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_model_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_model_service_host_no_port(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_model_service_host_with_port(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_model_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_model_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_service_grpc_lro_client(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_service_grpc_lro_async_client(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = ModelServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = ModelServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = ModelServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = ModelServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_evaluation_path(): + project = "squid" + location = "clam" + model = "whelk" + evaluation = "octopus" + expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) + actual = ModelServiceClient.model_evaluation_path(project, location, model, evaluation) + assert expected == actual + + +def test_parse_model_evaluation_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "model": "cuttlefish", + "evaluation": "mussel", + } + path = ModelServiceClient.model_evaluation_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_evaluation_path(path) + assert expected == actual + +def test_model_evaluation_slice_path(): + project = "winkle" + location = "nautilus" + model = "scallop" + evaluation = "abalone" + slice = "squid" + expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) + actual = ModelServiceClient.model_evaluation_slice_path(project, location, model, evaluation, slice) + assert expected == actual + + +def test_parse_model_evaluation_slice_path(): + expected = { + "project": "clam", + "location": "whelk", + "model": "octopus", + "evaluation": "oyster", + "slice": "nudibranch", + } + path = ModelServiceClient.model_evaluation_slice_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_evaluation_slice_path(path) + assert expected == actual + +def test_training_pipeline_path(): + project = "cuttlefish" + location = "mussel" + training_pipeline = "winkle" + expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + actual = ModelServiceClient.training_pipeline_path(project, location, training_pipeline) + assert expected == actual + + +def test_parse_training_pipeline_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "training_pipeline": "abalone", + } + path = ModelServiceClient.training_pipeline_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_training_pipeline_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ModelServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ModelServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ModelServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ModelServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ModelServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ModelServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ModelServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ModelServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ModelServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ModelServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = ModelServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py new file mode 100644 index 0000000000..d4e51cd143 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -0,0 +1,3916 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceAsyncClient +from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceClient +from google.cloud.aiplatform_v1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1.services.pipeline_service import transports +from google.cloud.aiplatform_v1.services.pipeline_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import context +from google.cloud.aiplatform_v1.types import deployed_model_ref +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import execution +from google.cloud.aiplatform_v1.types import io +from google.cloud.aiplatform_v1.types import model +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_service +from google.cloud.aiplatform_v1.types import pipeline_state +from google.cloud.aiplatform_v1.types import training_pipeline +from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.cloud.aiplatform_v1.types import value +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PipelineServiceClient._get_default_mtls_endpoint(None) is None + assert PipelineServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + PipelineServiceClient, + PipelineServiceAsyncClient, +]) +def test_pipeline_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + PipelineServiceClient, + PipelineServiceAsyncClient, +]) +def test_pipeline_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PipelineServiceGrpcTransport, "grpc"), + (transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + PipelineServiceClient, + PipelineServiceAsyncClient, +]) +def test_pipeline_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_pipeline_service_client_get_transport_class(): + transport = PipelineServiceClient.get_transport_class() + available_transports = [ + transports.PipelineServiceGrpcTransport, + ] + assert transport in available_transports + + transport = PipelineServiceClient.get_transport_class("grpc") + assert transport == transports.PipelineServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +def test_pipeline_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "true"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "false"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_pipeline_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_pipeline_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PipelineServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CreateTrainingPipelineRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + ) + response = client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +def test_create_training_pipeline_from_dict(): + test_create_training_pipeline(request_type=dict) + + +def test_create_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + client.create_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + +@pytest.mark.asyncio +async def test_create_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreateTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + )) + response = await client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_training_pipeline_async_from_dict(): + await test_create_training_pipeline_async(request_type=dict) + + +def test_create_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreateTrainingPipelineRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + call.return_value = gca_training_pipeline.TrainingPipeline() + client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreateTrainingPipelineRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) + await client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_training_pipeline( + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].training_pipeline == gca_training_pipeline.TrainingPipeline(name='name_value') + + +def test_create_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_training_pipeline( + pipeline_service.CreateTrainingPipelineRequest(), + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_training_pipeline( + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].training_pipeline == gca_training_pipeline.TrainingPipeline(name='name_value') + + +@pytest.mark.asyncio +async def test_create_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_training_pipeline( + pipeline_service.CreateTrainingPipelineRequest(), + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + +def test_get_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.GetTrainingPipelineRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + ) + response = client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +def test_get_training_pipeline_from_dict(): + test_get_training_pipeline(request_type=dict) + + +def test_get_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + client.get_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + +@pytest.mark.asyncio +async def test_get_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + )) + response = await client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_training_pipeline_async_from_dict(): + await test_get_training_pipeline_async(request_type=dict) + + +def test_get_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + call.return_value = training_pipeline.TrainingPipeline() + client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) + await client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_training_pipeline( + pipeline_service.GetTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_training_pipeline( + pipeline_service.GetTrainingPipelineRequest(), + name='name_value', + ) + + +def test_list_training_pipelines(transport: str = 'grpc', request_type=pipeline_service.ListTrainingPipelinesRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrainingPipelinesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_training_pipelines_from_dict(): + test_list_training_pipelines(request_type=dict) + + +def test_list_training_pipelines_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + client.list_training_pipelines() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + +@pytest.mark.asyncio +async def test_list_training_pipelines_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListTrainingPipelinesRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_from_dict(): + await test_list_training_pipelines_async(request_type=dict) + + +def test_list_training_pipelines_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListTrainingPipelinesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_training_pipelines_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListTrainingPipelinesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) + await client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_training_pipelines_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_training_pipelines( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_training_pipelines_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_training_pipelines( + pipeline_service.ListTrainingPipelinesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_training_pipelines_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_training_pipelines( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_training_pipelines_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_training_pipelines( + pipeline_service.ListTrainingPipelinesRequest(), + parent='parent_value', + ) + + +def test_list_training_pipelines_pager(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_training_pipelines(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, training_pipeline.TrainingPipeline) + for i in results) + +def test_list_training_pipelines_pages(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + pages = list(client.list_training_pipelines(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_training_pipelines(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, training_pipeline.TrainingPipeline) + for i in responses) + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_training_pipelines(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.DeleteTrainingPipelineRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_training_pipeline_from_dict(): + test_delete_training_pipeline(request_type=dict) + + +def test_delete_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + client.delete_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeleteTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_async_from_dict(): + await test_delete_training_pipeline_async(request_type=dict) + + +def test_delete_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeleteTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeleteTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_training_pipeline( + pipeline_service.DeleteTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_training_pipeline( + pipeline_service.DeleteTrainingPipelineRequest(), + name='name_value', + ) + + +def test_cancel_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CancelTrainingPipelineRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_training_pipeline_from_dict(): + test_cancel_training_pipeline(request_type=dict) + + +def test_cancel_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + client.cancel_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_async_from_dict(): + await test_cancel_training_pipeline_async(request_type=dict) + + +def test_cancel_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + call.return_value = None + client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_training_pipeline( + pipeline_service.CancelTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_training_pipeline( + pipeline_service.CancelTrainingPipelineRequest(), + name='name_value', + ) + + +def test_create_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CreatePipelineJobRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + ) + response = client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + + +def test_create_pipeline_job_from_dict(): + test_create_pipeline_job(request_type=dict) + + +def test_create_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + client.create_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreatePipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + )) + response = await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async_from_dict(): + await test_create_pipeline_job_async(request_type=dict) + + +def test_create_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + call.return_value = gca_pipeline_job.PipelineJob() + client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) + await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_pipeline_job( + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name='name_value') + assert args[0].pipeline_job_id == 'pipeline_job_id_value' + + +def test_create_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_pipeline_job( + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name='name_value') + assert args[0].pipeline_job_id == 'pipeline_job_id_value' + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + +def test_get_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.GetPipelineJobRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + ) + response = client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + + +def test_get_pipeline_job_from_dict(): + test_get_pipeline_job(request_type=dict) + + +def test_get_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + client.get_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetPipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + )) + response = await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async_from_dict(): + await test_get_pipeline_job_async(request_type=dict) + + +def test_get_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + call.return_value = pipeline_job.PipelineJob() + client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) + await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), + name='name_value', + ) + + +def test_list_pipeline_jobs(transport: str = 'grpc', request_type=pipeline_service.ListPipelineJobsRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_pipeline_jobs_from_dict(): + test_list_pipeline_jobs(request_type=dict) + + +def test_list_pipeline_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + client.list_pipeline_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListPipelineJobsRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_from_dict(): + await test_list_pipeline_jobs_async(request_type=dict) + + +def test_list_pipeline_jobs_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + call.return_value = pipeline_service.ListPipelineJobsResponse() + client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) + await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_pipeline_jobs_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_pipeline_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_pipeline_jobs_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_pipeline_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), + parent='parent_value', + ) + + +def test_list_pipeline_jobs_pager(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_pipeline_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) + for i in results) + +def test_list_pipeline_jobs_pages(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_pipeline_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_pipeline_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_pipeline_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.DeletePipelineJobRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_pipeline_job_from_dict(): + test_delete_pipeline_job(request_type=dict) + + +def test_delete_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + client.delete_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeletePipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async_from_dict(): + await test_delete_pipeline_job_async(request_type=dict) + + +def test_delete_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), + name='name_value', + ) + + +def test_cancel_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CancelPipelineJobRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_pipeline_job_from_dict(): + test_cancel_pipeline_job(request_type=dict) + + +def test_cancel_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + client.cancel_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelPipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async_from_dict(): + await test_cancel_pipeline_job_async(request_type=dict) + + +def test_cancel_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + call.return_value = None + client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PipelineServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PipelineServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PipelineServiceGrpcTransport, + ) + +def test_pipeline_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PipelineServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_pipeline_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PipelineServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_training_pipeline', + 'get_training_pipeline', + 'list_training_pipelines', + 'delete_training_pipeline', + 'cancel_training_pipeline', + 'create_pipeline_job', + 'get_pipeline_job', + 'list_pipeline_jobs', + 'delete_pipeline_job', + 'cancel_pipeline_job', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_pipeline_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_pipeline_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_pipeline_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_pipeline_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PipelineServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_pipeline_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PipelineServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_pipeline_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_pipeline_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PipelineServiceGrpcTransport, grpc_helpers), + (transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_pipeline_service_host_no_port(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_pipeline_service_host_with_port(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_pipeline_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PipelineServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_pipeline_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PipelineServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_pipeline_service_grpc_lro_client(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_pipeline_service_grpc_lro_async_client(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_artifact_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + actual = PipelineServiceClient.artifact_path(project, location, metadata_store, artifact) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = PipelineServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_artifact_path(path) + assert expected == actual + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + actual = PipelineServiceClient.context_path(project, location, metadata_store, context) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = PipelineServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_context_path(path) + assert expected == actual + +def test_custom_job_path(): + project = "oyster" + location = "nudibranch" + custom_job = "cuttlefish" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = PipelineServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "mussel", + "location": "winkle", + "custom_job": "nautilus", + } + path = PipelineServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_endpoint_path(): + project = "scallop" + location = "abalone" + endpoint = "squid" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = PipelineServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "clam", + "location": "whelk", + "endpoint": "octopus", + } + path = PipelineServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + actual = PipelineServiceClient.execution_path(project, location, metadata_store, execution) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = PipelineServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_execution_path(path) + assert expected == actual + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = PipelineServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = PipelineServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_model_path(path) + assert expected == actual + +def test_network_path(): + project = "cuttlefish" + network = "mussel" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = PipelineServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = PipelineServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_network_path(path) + assert expected == actual + +def test_pipeline_job_path(): + project = "scallop" + location = "abalone" + pipeline_job = "squid" + expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) + actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) + assert expected == actual + + +def test_parse_pipeline_job_path(): + expected = { + "project": "clam", + "location": "whelk", + "pipeline_job": "octopus", + } + path = PipelineServiceClient.pipeline_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_pipeline_job_path(path) + assert expected == actual + +def test_training_pipeline_path(): + project = "oyster" + location = "nudibranch" + training_pipeline = "cuttlefish" + expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + actual = PipelineServiceClient.training_pipeline_path(project, location, training_pipeline) + assert expected == actual + + +def test_parse_training_pipeline_path(): + expected = { + "project": "mussel", + "location": "winkle", + "training_pipeline": "nautilus", + } + path = PipelineServiceClient.training_pipeline_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_training_pipeline_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PipelineServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = PipelineServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format(folder=folder, ) + actual = PipelineServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = PipelineServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PipelineServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = PipelineServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format(project=project, ) + actual = PipelineServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = PipelineServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PipelineServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = PipelineServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PipelineServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py new file mode 100644 index 0000000000..d0f9db1820 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py @@ -0,0 +1,1175 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.prediction_service import PredictionServiceAsyncClient +from google.cloud.aiplatform_v1.services.prediction_service import PredictionServiceClient +from google.cloud.aiplatform_v1.services.prediction_service import transports +from google.cloud.aiplatform_v1.services.prediction_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1.types import prediction_service +from google.oauth2 import service_account +from google.protobuf import struct_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PredictionServiceClient._get_default_mtls_endpoint(None) is None + assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, + PredictionServiceAsyncClient, +]) +def test_prediction_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, + PredictionServiceAsyncClient, +]) +def test_prediction_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PredictionServiceGrpcTransport, "grpc"), + (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, + PredictionServiceAsyncClient, +]) +def test_prediction_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_prediction_service_client_get_transport_class(): + transport = PredictionServiceClient.get_transport_class() + available_transports = [ + transports.PredictionServiceGrpcTransport, + ] + assert transport in available_transports + + transport = PredictionServiceClient.get_transport_class("grpc") + assert transport == transports.PredictionServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_prediction_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PredictionServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_predict(transport: str = 'grpc', request_type=prediction_service.PredictRequest): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse( + deployed_model_id='deployed_model_id_value', + ) + response = client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +def test_predict_from_dict(): + test_predict(request_type=dict) + + +def test_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + client.predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + +@pytest.mark.asyncio +async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( + deployed_model_id='deployed_model_id_value', + )) + response = await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +@pytest.mark.asyncio +async def test_predict_async_from_dict(): + await test_predict_async(request_type=dict) + + +def test_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = prediction_service.PredictResponse() + client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +def test_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.predict( + endpoint='endpoint_value', + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + + +def test_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + endpoint='endpoint_value', + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + ) + + +@pytest.mark.asyncio +async def test_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.predict( + endpoint='endpoint_value', + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + + +@pytest.mark.asyncio +async def test_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.predict( + prediction_service.PredictRequest(), + endpoint='endpoint_value', + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PredictionServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PredictionServiceGrpcTransport, + ) + +def test_prediction_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_prediction_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'predict', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +@requires_google_auth_gte_1_25_0 +def test_prediction_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_prediction_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_prediction_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_prediction_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_prediction_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_prediction_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_prediction_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PredictionServiceGrpcTransport, grpc_helpers), + (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_prediction_service_host_no_port(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_prediction_service_host_with_port(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_prediction_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_prediction_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = PredictionServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = PredictionServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PredictionServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = PredictionServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = PredictionServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = PredictionServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PredictionServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = PredictionServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = PredictionServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = PredictionServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PredictionServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = PredictionServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PredictionServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py new file mode 100644 index 0000000000..09242fe3e6 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py @@ -0,0 +1,2346 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.specialist_pool_service import SpecialistPoolServiceAsyncClient +from google.cloud.aiplatform_v1.services.specialist_pool_service import SpecialistPoolServiceClient +from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1.services.specialist_pool_service import transports +from google.cloud.aiplatform_v1.services.specialist_pool_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1.types import specialist_pool_service +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(None) is None + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + SpecialistPoolServiceClient, + SpecialistPoolServiceAsyncClient, +]) +def test_specialist_pool_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + SpecialistPoolServiceClient, + SpecialistPoolServiceAsyncClient, +]) +def test_specialist_pool_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + SpecialistPoolServiceClient, + SpecialistPoolServiceAsyncClient, +]) +def test_specialist_pool_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_specialist_pool_service_client_get_transport_class(): + transport = SpecialistPoolServiceClient.get_transport_class() + available_transports = [ + transports.SpecialistPoolServiceGrpcTransport, + ] + assert transport in available_transports + + transport = SpecialistPoolServiceClient.get_transport_class("grpc") + assert transport == transports.SpecialistPoolServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +def test_specialist_pool_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "true"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "false"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_specialist_pool_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_specialist_pool_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = SpecialistPoolServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.CreateSpecialistPoolRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_specialist_pool_from_dict(): + test_create_specialist_pool(request_type=dict) + + +def test_create_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + client.create_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + +@pytest.mark.asyncio +async def test_create_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.CreateSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_specialist_pool_async_from_dict(): + await test_create_specialist_pool_async(request_type=dict) + + +def test_create_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.CreateSpecialistPoolRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.CreateSpecialistPoolRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_specialist_pool( + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') + + +def test_create_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_specialist_pool( + specialist_pool_service.CreateSpecialistPoolRequest(), + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_specialist_pool( + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') + + +@pytest.mark.asyncio +async def test_create_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_specialist_pool( + specialist_pool_service.CreateSpecialistPoolRequest(), + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + +def test_get_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.GetSpecialistPoolRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool( + name='name_value', + display_name='display_name_value', + specialist_managers_count=2662, + specialist_manager_emails=['specialist_manager_emails_value'], + pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], + ) + response = client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, specialist_pool.SpecialistPool) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.specialist_managers_count == 2662 + assert response.specialist_manager_emails == ['specialist_manager_emails_value'] + assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] + + +def test_get_specialist_pool_from_dict(): + test_get_specialist_pool(request_type=dict) + + +def test_get_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + client.get_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + +@pytest.mark.asyncio +async def test_get_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.GetSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool( + name='name_value', + display_name='display_name_value', + specialist_managers_count=2662, + specialist_manager_emails=['specialist_manager_emails_value'], + pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], + )) + response = await client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, specialist_pool.SpecialistPool) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.specialist_managers_count == 2662 + assert response.specialist_manager_emails == ['specialist_manager_emails_value'] + assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] + + +@pytest.mark.asyncio +async def test_get_specialist_pool_async_from_dict(): + await test_get_specialist_pool_async(request_type=dict) + + +def test_get_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.GetSpecialistPoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + call.return_value = specialist_pool.SpecialistPool() + client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.GetSpecialistPoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) + await client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_specialist_pool( + specialist_pool_service.GetSpecialistPoolRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_specialist_pool( + specialist_pool_service.GetSpecialistPoolRequest(), + name='name_value', + ) + + +def test_list_specialist_pools(transport: str = 'grpc', request_type=specialist_pool_service.ListSpecialistPoolsRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSpecialistPoolsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_specialist_pools_from_dict(): + test_list_specialist_pools(request_type=dict) + + +def test_list_specialist_pools_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + client.list_specialist_pools() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + +@pytest.mark.asyncio +async def test_list_specialist_pools_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.ListSpecialistPoolsRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_from_dict(): + await test_list_specialist_pools_async(request_type=dict) + + +def test_list_specialist_pools_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.ListSpecialistPoolsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_specialist_pools_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.ListSpecialistPoolsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) + await client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_specialist_pools_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_specialist_pools( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_specialist_pools_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_specialist_pools( + specialist_pool_service.ListSpecialistPoolsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_specialist_pools_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_specialist_pools( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_specialist_pools_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_specialist_pools( + specialist_pool_service.ListSpecialistPoolsRequest(), + parent='parent_value', + ) + + +def test_list_specialist_pools_pager(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_specialist_pools(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, specialist_pool.SpecialistPool) + for i in results) + +def test_list_specialist_pools_pages(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + pages = list(client.list_specialist_pools(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_pager(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_specialist_pools(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, specialist_pool.SpecialistPool) + for i in responses) + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_pages(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_specialist_pools(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_specialist_pool_from_dict(): + test_delete_specialist_pool(request_type=dict) + + +def test_delete_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + client.delete_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_async_from_dict(): + await test_delete_specialist_pool_async(request_type=dict) + + +def test_delete_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.DeleteSpecialistPoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.DeleteSpecialistPoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_specialist_pool( + specialist_pool_service.DeleteSpecialistPoolRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_specialist_pool( + specialist_pool_service.DeleteSpecialistPoolRequest(), + name='name_value', + ) + + +def test_update_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_specialist_pool_from_dict(): + test_update_specialist_pool(request_type=dict) + + +def test_update_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + client.update_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + +@pytest.mark.asyncio +async def test_update_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_specialist_pool_async_from_dict(): + await test_update_specialist_pool_async(request_type=dict) + + +def test_update_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.UpdateSpecialistPoolRequest() + + request.specialist_pool.name = 'specialist_pool.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'specialist_pool.name=specialist_pool.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.UpdateSpecialistPoolRequest() + + request.specialist_pool.name = 'specialist_pool.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'specialist_pool.name=specialist_pool.name/value', + ) in kw['metadata'] + + +def test_update_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_specialist_pool( + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_specialist_pool( + specialist_pool_service.UpdateSpecialistPoolRequest(), + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_specialist_pool( + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_specialist_pool( + specialist_pool_service.UpdateSpecialistPoolRequest(), + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SpecialistPoolServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.SpecialistPoolServiceGrpcTransport, + ) + +def test_specialist_pool_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SpecialistPoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_specialist_pool_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.SpecialistPoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_specialist_pool', + 'get_specialist_pool', + 'list_specialist_pools', + 'delete_specialist_pool', + 'update_specialist_pool', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_specialist_pool_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_specialist_pool_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_specialist_pool_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_specialist_pool_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SpecialistPoolServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_specialist_pool_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SpecialistPoolServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_specialist_pool_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_specialist_pool_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.SpecialistPoolServiceGrpcTransport, grpc_helpers), + (transports.SpecialistPoolServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_specialist_pool_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_specialist_pool_service_host_no_port(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_specialist_pool_service_host_with_port(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_specialist_pool_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpecialistPoolServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_specialist_pool_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_specialist_pool_service_grpc_lro_client(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_specialist_pool_service_grpc_lro_async_client(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_specialist_pool_path(): + project = "squid" + location = "clam" + specialist_pool = "whelk" + expected = "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) + actual = SpecialistPoolServiceClient.specialist_pool_path(project, location, specialist_pool) + assert expected == actual + + +def test_parse_specialist_pool_path(): + expected = { + "project": "octopus", + "location": "oyster", + "specialist_pool": "nudibranch", + } + path = SpecialistPoolServiceClient.specialist_pool_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_specialist_pool_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = SpecialistPoolServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = SpecialistPoolServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = SpecialistPoolServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = SpecialistPoolServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = SpecialistPoolServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = SpecialistPoolServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = SpecialistPoolServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = SpecialistPoolServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = SpecialistPoolServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = SpecialistPoolServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = SpecialistPoolServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/.coveragerc b/owl-bot-staging/v1beta1/.coveragerc new file mode 100644 index 0000000000..a328166917 --- /dev/null +++ b/owl-bot-staging/v1beta1/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/MANIFEST.in b/owl-bot-staging/v1beta1/MANIFEST.in new file mode 100644 index 0000000000..e386e05fec --- /dev/null +++ b/owl-bot-staging/v1beta1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1beta1/schema/trainingjob/definition *.py +recursive-include google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/README.rst b/owl-bot-staging/v1beta1/README.rst new file mode 100644 index 0000000000..c0e4d26d4d --- /dev/null +++ b/owl-bot-staging/v1beta1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst new file mode 100644 index 0000000000..43fad30e55 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst @@ -0,0 +1,10 @@ +DatasetService +-------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst new file mode 100644 index 0000000000..022799a059 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst @@ -0,0 +1,10 @@ +EndpointService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst new file mode 100644 index 0000000000..21013eb751 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst @@ -0,0 +1,6 @@ +FeaturestoreOnlineServingService +-------------------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst new file mode 100644 index 0000000000..8d2f33039e --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst @@ -0,0 +1,10 @@ +FeaturestoreService +------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst new file mode 100644 index 0000000000..65c910142e --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst @@ -0,0 +1,10 @@ +IndexEndpointService +-------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst new file mode 100644 index 0000000000..96afb58594 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst @@ -0,0 +1,10 @@ +IndexService +------------------------------ + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst new file mode 100644 index 0000000000..46b1268166 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst @@ -0,0 +1,10 @@ +JobService +---------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst new file mode 100644 index 0000000000..3c07725687 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst @@ -0,0 +1,10 @@ +MetadataService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst new file mode 100644 index 0000000000..be164d59ba --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst @@ -0,0 +1,10 @@ +MigrationService +---------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst new file mode 100644 index 0000000000..be68f796b0 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst @@ -0,0 +1,10 @@ +ModelService +------------------------------ + +.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst new file mode 100644 index 0000000000..1180370863 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst @@ -0,0 +1,10 @@ +PipelineService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst new file mode 100644 index 0000000000..03c1150df0 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst @@ -0,0 +1,6 @@ +PredictionService +----------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.prediction_service + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst new file mode 100644 index 0000000000..490112c7d9 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst @@ -0,0 +1,20 @@ +Services for Google Cloud Aiplatform v1beta1 API +================================================ +.. toctree:: + :maxdepth: 2 + + dataset_service + endpoint_service + featurestore_online_serving_service + featurestore_service + index_endpoint_service + index_service + job_service + metadata_service + migration_service + model_service + pipeline_service + prediction_service + specialist_pool_service + tensorboard_service + vizier_service diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst new file mode 100644 index 0000000000..2f13b68844 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst @@ -0,0 +1,10 @@ +SpecialistPoolService +--------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst new file mode 100644 index 0000000000..97d94feedc --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst @@ -0,0 +1,10 @@ +TensorboardService +------------------------------------ + +.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst new file mode 100644 index 0000000000..770675f8ea --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform v1beta1 API +============================================= + +.. automodule:: google.cloud.aiplatform_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst new file mode 100644 index 0000000000..8cad590f6c --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst @@ -0,0 +1,10 @@ +VizierService +------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.vizier_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.vizier_service.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/conf.py b/owl-bot-staging/v1beta1/docs/conf.py new file mode 100644 index 0000000000..7a174dcfc0 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1beta1-schema-trainingjob-definition documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1beta1 Schema Trainingjob Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition.tex", + u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", + u"Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", + u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition Documentation", + author, + "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", + "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst new file mode 100644 index 0000000000..5f1ed5f2b7 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API +====================================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst new file mode 100644 index 0000000000..f4fe7a5301 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API +=================================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/index.rst b/owl-bot-staging/v1beta1/docs/index.rst new file mode 100644 index 0000000000..ec6c42c2ed --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + definition_v1beta1/services + definition_v1beta1/types diff --git a/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst new file mode 100644 index 0000000000..941dbcca59 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +================================================================================ +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst new file mode 100644 index 0000000000..7caa088065 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +============================================================================= + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst new file mode 100644 index 0000000000..b3b897a0f4 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +============================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst new file mode 100644 index 0000000000..722a1d8ba0 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +=========================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst new file mode 100644 index 0000000000..6de5e17520 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +================================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst new file mode 100644 index 0000000000..b14182d6d7 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +=============================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py new file mode 100644 index 0000000000..009ab39ee1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py @@ -0,0 +1,875 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.aiplatform_v1beta1.services.dataset_service.client import DatasetServiceClient +from google.cloud.aiplatform_v1beta1.services.dataset_service.async_client import DatasetServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.endpoint_service.client import EndpointServiceClient +from google.cloud.aiplatform_v1beta1.services.endpoint_service.async_client import EndpointServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.client import FeaturestoreOnlineServingServiceClient +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.async_client import FeaturestoreOnlineServingServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.featurestore_service.client import FeaturestoreServiceClient +from google.cloud.aiplatform_v1beta1.services.featurestore_service.async_client import FeaturestoreServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.client import IndexEndpointServiceClient +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.async_client import IndexEndpointServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.index_service.client import IndexServiceClient +from google.cloud.aiplatform_v1beta1.services.index_service.async_client import IndexServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.job_service.client import JobServiceClient +from google.cloud.aiplatform_v1beta1.services.job_service.async_client import JobServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.metadata_service.client import MetadataServiceClient +from google.cloud.aiplatform_v1beta1.services.metadata_service.async_client import MetadataServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.migration_service.client import MigrationServiceClient +from google.cloud.aiplatform_v1beta1.services.migration_service.async_client import MigrationServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.model_service.client import ModelServiceClient +from google.cloud.aiplatform_v1beta1.services.model_service.async_client import ModelServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.pipeline_service.client import PipelineServiceClient +from google.cloud.aiplatform_v1beta1.services.pipeline_service.async_client import PipelineServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.prediction_service.client import PredictionServiceClient +from google.cloud.aiplatform_v1beta1.services.prediction_service.async_client import PredictionServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.client import SpecialistPoolServiceClient +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.async_client import SpecialistPoolServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.tensorboard_service.client import TensorboardServiceClient +from google.cloud.aiplatform_v1beta1.services.tensorboard_service.async_client import TensorboardServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.vizier_service.client import VizierServiceClient +from google.cloud.aiplatform_v1beta1.services.vizier_service.async_client import VizierServiceAsyncClient + +from google.cloud.aiplatform_v1beta1.types.accelerator_type import AcceleratorType +from google.cloud.aiplatform_v1beta1.types.annotation import Annotation +from google.cloud.aiplatform_v1beta1.types.annotation_spec import AnnotationSpec +from google.cloud.aiplatform_v1beta1.types.artifact import Artifact +from google.cloud.aiplatform_v1beta1.types.batch_prediction_job import BatchPredictionJob +from google.cloud.aiplatform_v1beta1.types.completion_stats import CompletionStats +from google.cloud.aiplatform_v1beta1.types.context import Context +from google.cloud.aiplatform_v1beta1.types.custom_job import ContainerSpec +from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJob +from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJobSpec +from google.cloud.aiplatform_v1beta1.types.custom_job import PythonPackageSpec +from google.cloud.aiplatform_v1beta1.types.custom_job import Scheduling +from google.cloud.aiplatform_v1beta1.types.custom_job import WorkerPoolSpec +from google.cloud.aiplatform_v1beta1.types.data_item import DataItem +from google.cloud.aiplatform_v1beta1.types.data_labeling_job import ActiveLearningConfig +from google.cloud.aiplatform_v1beta1.types.data_labeling_job import DataLabelingJob +from google.cloud.aiplatform_v1beta1.types.data_labeling_job import SampleConfig +from google.cloud.aiplatform_v1beta1.types.data_labeling_job import TrainingConfig +from google.cloud.aiplatform_v1beta1.types.dataset import Dataset +from google.cloud.aiplatform_v1beta1.types.dataset import ExportDataConfig +from google.cloud.aiplatform_v1beta1.types.dataset import ImportDataConfig +from google.cloud.aiplatform_v1beta1.types.dataset_service import CreateDatasetOperationMetadata +from google.cloud.aiplatform_v1beta1.types.dataset_service import CreateDatasetRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import DeleteDatasetRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataOperationMetadata +from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import GetAnnotationSpecRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import GetDatasetRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataOperationMetadata +from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListAnnotationsRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListAnnotationsResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDataItemsRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDataItemsResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDatasetsRequest +from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDatasetsResponse +from google.cloud.aiplatform_v1beta1.types.dataset_service import UpdateDatasetRequest +from google.cloud.aiplatform_v1beta1.types.deployed_index_ref import DeployedIndexRef +from google.cloud.aiplatform_v1beta1.types.deployed_model_ref import DeployedModelRef +from google.cloud.aiplatform_v1beta1.types.encryption_spec import EncryptionSpec +from google.cloud.aiplatform_v1beta1.types.endpoint import DeployedModel +from google.cloud.aiplatform_v1beta1.types.endpoint import Endpoint +from google.cloud.aiplatform_v1beta1.types.endpoint_service import CreateEndpointOperationMetadata +from google.cloud.aiplatform_v1beta1.types.endpoint_service import CreateEndpointRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeleteEndpointRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelOperationMetadata +from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelResponse +from google.cloud.aiplatform_v1beta1.types.endpoint_service import GetEndpointRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import ListEndpointsRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import ListEndpointsResponse +from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelOperationMetadata +from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelRequest +from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelResponse +from google.cloud.aiplatform_v1beta1.types.endpoint_service import UpdateEndpointRequest +from google.cloud.aiplatform_v1beta1.types.entity_type import EntityType +from google.cloud.aiplatform_v1beta1.types.env_var import EnvVar +from google.cloud.aiplatform_v1beta1.types.event import Event +from google.cloud.aiplatform_v1beta1.types.execution import Execution +from google.cloud.aiplatform_v1beta1.types.explanation import Attribution +from google.cloud.aiplatform_v1beta1.types.explanation import Explanation +from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationMetadataOverride +from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationParameters +from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationSpec +from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationSpecOverride +from google.cloud.aiplatform_v1beta1.types.explanation import FeatureNoiseSigma +from google.cloud.aiplatform_v1beta1.types.explanation import IntegratedGradientsAttribution +from google.cloud.aiplatform_v1beta1.types.explanation import ModelExplanation +from google.cloud.aiplatform_v1beta1.types.explanation import SampledShapleyAttribution +from google.cloud.aiplatform_v1beta1.types.explanation import SmoothGradConfig +from google.cloud.aiplatform_v1beta1.types.explanation import XraiAttribution +from google.cloud.aiplatform_v1beta1.types.explanation_metadata import ExplanationMetadata +from google.cloud.aiplatform_v1beta1.types.feature import Feature +from google.cloud.aiplatform_v1beta1.types.feature_monitoring_stats import FeatureStatsAnomaly +from google.cloud.aiplatform_v1beta1.types.feature_selector import FeatureSelector +from google.cloud.aiplatform_v1beta1.types.feature_selector import IdMatcher +from google.cloud.aiplatform_v1beta1.types.featurestore import Featurestore +from google.cloud.aiplatform_v1beta1.types.featurestore_monitoring import FeaturestoreMonitoringConfig +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import FeatureValue +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import FeatureValueList +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import ReadFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import ReadFeatureValuesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import StreamingReadFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateEntityTypeOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateEntityTypeRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeatureOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeatureRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeaturestoreOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeaturestoreRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteEntityTypeRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteFeatureRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteFeaturestoreRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import DestinationFeatureSetting +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import FeatureValueDestination +from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetEntityTypeRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetFeatureRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetFeaturestoreRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListEntityTypesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListEntityTypesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturestoresRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturestoresResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import SearchFeaturesRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import SearchFeaturesResponse +from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateEntityTypeRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeatureRequest +from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeaturestoreOperationMetadata +from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeaturestoreRequest +from google.cloud.aiplatform_v1beta1.types.hyperparameter_tuning_job import HyperparameterTuningJob +from google.cloud.aiplatform_v1beta1.types.index import Index +from google.cloud.aiplatform_v1beta1.types.index_endpoint import DeployedIndex +from google.cloud.aiplatform_v1beta1.types.index_endpoint import DeployedIndexAuthConfig +from google.cloud.aiplatform_v1beta1.types.index_endpoint import IndexEndpoint +from google.cloud.aiplatform_v1beta1.types.index_endpoint import IndexPrivateEndpoints +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import CreateIndexEndpointOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import CreateIndexEndpointRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeleteIndexEndpointRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexResponse +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import GetIndexEndpointRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import ListIndexEndpointsRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import ListIndexEndpointsResponse +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexResponse +from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UpdateIndexEndpointRequest +from google.cloud.aiplatform_v1beta1.types.index_service import CreateIndexOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_service import CreateIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_service import DeleteIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_service import GetIndexRequest +from google.cloud.aiplatform_v1beta1.types.index_service import ListIndexesRequest +from google.cloud.aiplatform_v1beta1.types.index_service import ListIndexesResponse +from google.cloud.aiplatform_v1beta1.types.index_service import NearestNeighborSearchOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_service import UpdateIndexOperationMetadata +from google.cloud.aiplatform_v1beta1.types.index_service import UpdateIndexRequest +from google.cloud.aiplatform_v1beta1.types.io import AvroSource +from google.cloud.aiplatform_v1beta1.types.io import BigQueryDestination +from google.cloud.aiplatform_v1beta1.types.io import BigQuerySource +from google.cloud.aiplatform_v1beta1.types.io import ContainerRegistryDestination +from google.cloud.aiplatform_v1beta1.types.io import CsvDestination +from google.cloud.aiplatform_v1beta1.types.io import CsvSource +from google.cloud.aiplatform_v1beta1.types.io import GcsDestination +from google.cloud.aiplatform_v1beta1.types.io import GcsSource +from google.cloud.aiplatform_v1beta1.types.io import TFRecordDestination +from google.cloud.aiplatform_v1beta1.types.job_service import CancelBatchPredictionJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CancelCustomJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CancelDataLabelingJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CancelHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateBatchPredictionJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateCustomJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateDataLabelingJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import CreateModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteBatchPredictionJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteCustomJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteDataLabelingJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import DeleteModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetBatchPredictionJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetCustomJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetDataLabelingJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetHyperparameterTuningJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import GetModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListBatchPredictionJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListBatchPredictionJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import ListCustomJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListCustomJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import ListDataLabelingJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListDataLabelingJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import ListHyperparameterTuningJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListHyperparameterTuningJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import ListModelDeploymentMonitoringJobsRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ListModelDeploymentMonitoringJobsResponse +from google.cloud.aiplatform_v1beta1.types.job_service import PauseModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import ResumeModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest +from google.cloud.aiplatform_v1beta1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse +from google.cloud.aiplatform_v1beta1.types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata +from google.cloud.aiplatform_v1beta1.types.job_service import UpdateModelDeploymentMonitoringJobRequest +from google.cloud.aiplatform_v1beta1.types.job_state import JobState +from google.cloud.aiplatform_v1beta1.types.lineage_subgraph import LineageSubgraph +from google.cloud.aiplatform_v1beta1.types.machine_resources import AutomaticResources +from google.cloud.aiplatform_v1beta1.types.machine_resources import AutoscalingMetricSpec +from google.cloud.aiplatform_v1beta1.types.machine_resources import BatchDedicatedResources +from google.cloud.aiplatform_v1beta1.types.machine_resources import DedicatedResources +from google.cloud.aiplatform_v1beta1.types.machine_resources import DiskSpec +from google.cloud.aiplatform_v1beta1.types.machine_resources import MachineSpec +from google.cloud.aiplatform_v1beta1.types.machine_resources import ResourcesConsumed +from google.cloud.aiplatform_v1beta1.types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from google.cloud.aiplatform_v1beta1.types.metadata_schema import MetadataSchema +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextArtifactsAndExecutionsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextArtifactsAndExecutionsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextChildrenRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextChildrenResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddExecutionEventsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import AddExecutionEventsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateArtifactRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateContextRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateExecutionRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataSchemaRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataStoreOperationMetadata +from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataStoreRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteContextRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteMetadataStoreOperationMetadata +from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteMetadataStoreRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetArtifactRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetContextRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetExecutionRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetMetadataSchemaRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import GetMetadataStoreRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListArtifactsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListArtifactsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListContextsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListContextsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListExecutionsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListExecutionsResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataSchemasRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataSchemasResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataStoresRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataStoresResponse +from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryArtifactLineageSubgraphRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryContextLineageSubgraphRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryExecutionInputsAndOutputsRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateArtifactRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateContextRequest +from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateExecutionRequest +from google.cloud.aiplatform_v1beta1.types.metadata_store import MetadataStore +from google.cloud.aiplatform_v1beta1.types.migratable_resource import MigratableResource +from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesOperationMetadata +from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesRequest +from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesResponse +from google.cloud.aiplatform_v1beta1.types.migration_service import MigrateResourceRequest +from google.cloud.aiplatform_v1beta1.types.migration_service import MigrateResourceResponse +from google.cloud.aiplatform_v1beta1.types.migration_service import SearchMigratableResourcesRequest +from google.cloud.aiplatform_v1beta1.types.migration_service import SearchMigratableResourcesResponse +from google.cloud.aiplatform_v1beta1.types.model import Model +from google.cloud.aiplatform_v1beta1.types.model import ModelContainerSpec +from google.cloud.aiplatform_v1beta1.types.model import Port +from google.cloud.aiplatform_v1beta1.types.model import PredictSchemata +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies +from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType +from google.cloud.aiplatform_v1beta1.types.model_evaluation import ModelEvaluation +from google.cloud.aiplatform_v1beta1.types.model_evaluation_slice import ModelEvaluationSlice +from google.cloud.aiplatform_v1beta1.types.model_monitoring import ModelMonitoringAlertConfig +from google.cloud.aiplatform_v1beta1.types.model_monitoring import ModelMonitoringObjectiveConfig +from google.cloud.aiplatform_v1beta1.types.model_monitoring import SamplingStrategy +from google.cloud.aiplatform_v1beta1.types.model_monitoring import ThresholdConfig +from google.cloud.aiplatform_v1beta1.types.model_service import DeleteModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelOperationMetadata +from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelResponse +from google.cloud.aiplatform_v1beta1.types.model_service import GetModelEvaluationRequest +from google.cloud.aiplatform_v1beta1.types.model_service import GetModelEvaluationSliceRequest +from google.cloud.aiplatform_v1beta1.types.model_service import GetModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationSlicesRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationSlicesResponse +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationsRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationsResponse +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelsRequest +from google.cloud.aiplatform_v1beta1.types.model_service import ListModelsResponse +from google.cloud.aiplatform_v1beta1.types.model_service import UpdateModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelOperationMetadata +from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelRequest +from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelResponse +from google.cloud.aiplatform_v1beta1.types.operation import DeleteOperationMetadata +from google.cloud.aiplatform_v1beta1.types.operation import GenericOperationMetadata +from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineJob +from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineJobDetail +from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineTaskDetail +from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineTaskExecutorDetail +from google.cloud.aiplatform_v1beta1.types.pipeline_service import CancelPipelineJobRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import CancelTrainingPipelineRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import CreatePipelineJobRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import CreateTrainingPipelineRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import DeletePipelineJobRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import DeleteTrainingPipelineRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import GetPipelineJobRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import GetTrainingPipelineRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListPipelineJobsRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListPipelineJobsResponse +from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListTrainingPipelinesRequest +from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListTrainingPipelinesResponse +from google.cloud.aiplatform_v1beta1.types.pipeline_state import PipelineState +from google.cloud.aiplatform_v1beta1.types.prediction_service import ExplainRequest +from google.cloud.aiplatform_v1beta1.types.prediction_service import ExplainResponse +from google.cloud.aiplatform_v1beta1.types.prediction_service import PredictRequest +from google.cloud.aiplatform_v1beta1.types.prediction_service import PredictResponse +from google.cloud.aiplatform_v1beta1.types.specialist_pool import SpecialistPool +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import CreateSpecialistPoolOperationMetadata +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import CreateSpecialistPoolRequest +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import DeleteSpecialistPoolRequest +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import GetSpecialistPoolRequest +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import ListSpecialistPoolsRequest +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import ListSpecialistPoolsResponse +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata +from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import UpdateSpecialistPoolRequest +from google.cloud.aiplatform_v1beta1.types.study import Measurement +from google.cloud.aiplatform_v1beta1.types.study import Study +from google.cloud.aiplatform_v1beta1.types.study import StudySpec +from google.cloud.aiplatform_v1beta1.types.study import Trial +from google.cloud.aiplatform_v1beta1.types.tensorboard import Tensorboard +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import Scalar +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardBlob +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardBlobSequence +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardTensor +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TimeSeriesData +from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TimeSeriesDataPoint +from google.cloud.aiplatform_v1beta1.types.tensorboard_experiment import TensorboardExperiment +from google.cloud.aiplatform_v1beta1.types.tensorboard_run import TensorboardRun +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardExperimentRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardOperationMetadata +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardRunRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardExperimentRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardRunRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardExperimentRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardRunRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardExperimentsRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardExperimentsResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardRunsRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardRunsResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardsRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardsResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardTimeSeriesResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardBlobDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardBlobDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardExperimentRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardOperationMetadata +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardRunRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardTimeSeriesRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardRunDataRequest +from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardRunDataResponse +from google.cloud.aiplatform_v1beta1.types.tensorboard_time_series import TensorboardTimeSeries +from google.cloud.aiplatform_v1beta1.types.training_pipeline import FilterSplit +from google.cloud.aiplatform_v1beta1.types.training_pipeline import FractionSplit +from google.cloud.aiplatform_v1beta1.types.training_pipeline import InputDataConfig +from google.cloud.aiplatform_v1beta1.types.training_pipeline import PredefinedSplit +from google.cloud.aiplatform_v1beta1.types.training_pipeline import TimestampSplit +from google.cloud.aiplatform_v1beta1.types.training_pipeline import TrainingPipeline +from google.cloud.aiplatform_v1beta1.types.types import BoolArray +from google.cloud.aiplatform_v1beta1.types.types import DoubleArray +from google.cloud.aiplatform_v1beta1.types.types import Int64Array +from google.cloud.aiplatform_v1beta1.types.types import StringArray +from google.cloud.aiplatform_v1beta1.types.user_action_reference import UserActionReference +from google.cloud.aiplatform_v1beta1.types.value import Value +from google.cloud.aiplatform_v1beta1.types.vizier_service import AddTrialMeasurementRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateMetatdata +from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateResponse +from google.cloud.aiplatform_v1beta1.types.vizier_service import CompleteTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import CreateStudyRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import CreateTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import DeleteStudyRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import DeleteTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import GetStudyRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import GetTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListOptimalTrialsRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListOptimalTrialsResponse +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListStudiesRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListStudiesResponse +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListTrialsRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import ListTrialsResponse +from google.cloud.aiplatform_v1beta1.types.vizier_service import LookupStudyRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import StopTrialRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsMetadata +from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsRequest +from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsResponse + +__all__ = ('DatasetServiceClient', + 'DatasetServiceAsyncClient', + 'EndpointServiceClient', + 'EndpointServiceAsyncClient', + 'FeaturestoreOnlineServingServiceClient', + 'FeaturestoreOnlineServingServiceAsyncClient', + 'FeaturestoreServiceClient', + 'FeaturestoreServiceAsyncClient', + 'IndexEndpointServiceClient', + 'IndexEndpointServiceAsyncClient', + 'IndexServiceClient', + 'IndexServiceAsyncClient', + 'JobServiceClient', + 'JobServiceAsyncClient', + 'MetadataServiceClient', + 'MetadataServiceAsyncClient', + 'MigrationServiceClient', + 'MigrationServiceAsyncClient', + 'ModelServiceClient', + 'ModelServiceAsyncClient', + 'PipelineServiceClient', + 'PipelineServiceAsyncClient', + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', + 'SpecialistPoolServiceClient', + 'SpecialistPoolServiceAsyncClient', + 'TensorboardServiceClient', + 'TensorboardServiceAsyncClient', + 'VizierServiceClient', + 'VizierServiceAsyncClient', + 'AcceleratorType', + 'Annotation', + 'AnnotationSpec', + 'Artifact', + 'BatchPredictionJob', + 'CompletionStats', + 'Context', + 'ContainerSpec', + 'CustomJob', + 'CustomJobSpec', + 'PythonPackageSpec', + 'Scheduling', + 'WorkerPoolSpec', + 'DataItem', + 'ActiveLearningConfig', + 'DataLabelingJob', + 'SampleConfig', + 'TrainingConfig', + 'Dataset', + 'ExportDataConfig', + 'ImportDataConfig', + 'CreateDatasetOperationMetadata', + 'CreateDatasetRequest', + 'DeleteDatasetRequest', + 'ExportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'ImportDataOperationMetadata', + 'ImportDataRequest', + 'ImportDataResponse', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeployedIndexRef', + 'DeployedModelRef', + 'EncryptionSpec', + 'DeployedModel', + 'Endpoint', + 'CreateEndpointOperationMetadata', + 'CreateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelOperationMetadata', + 'DeployModelRequest', + 'DeployModelResponse', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UndeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UpdateEndpointRequest', + 'EntityType', + 'EnvVar', + 'Event', + 'Execution', + 'Attribution', + 'Explanation', + 'ExplanationMetadataOverride', + 'ExplanationParameters', + 'ExplanationSpec', + 'ExplanationSpecOverride', + 'FeatureNoiseSigma', + 'IntegratedGradientsAttribution', + 'ModelExplanation', + 'SampledShapleyAttribution', + 'SmoothGradConfig', + 'XraiAttribution', + 'ExplanationMetadata', + 'Feature', + 'FeatureStatsAnomaly', + 'FeatureSelector', + 'IdMatcher', + 'Featurestore', + 'FeaturestoreMonitoringConfig', + 'FeatureValue', + 'FeatureValueList', + 'ReadFeatureValuesRequest', + 'ReadFeatureValuesResponse', + 'StreamingReadFeatureValuesRequest', + 'BatchCreateFeaturesOperationMetadata', + 'BatchCreateFeaturesRequest', + 'BatchCreateFeaturesResponse', + 'BatchReadFeatureValuesOperationMetadata', + 'BatchReadFeatureValuesRequest', + 'BatchReadFeatureValuesResponse', + 'CreateEntityTypeOperationMetadata', + 'CreateEntityTypeRequest', + 'CreateFeatureOperationMetadata', + 'CreateFeatureRequest', + 'CreateFeaturestoreOperationMetadata', + 'CreateFeaturestoreRequest', + 'DeleteEntityTypeRequest', + 'DeleteFeatureRequest', + 'DeleteFeaturestoreRequest', + 'DestinationFeatureSetting', + 'ExportFeatureValuesOperationMetadata', + 'ExportFeatureValuesRequest', + 'ExportFeatureValuesResponse', + 'FeatureValueDestination', + 'GetEntityTypeRequest', + 'GetFeatureRequest', + 'GetFeaturestoreRequest', + 'ImportFeatureValuesOperationMetadata', + 'ImportFeatureValuesRequest', + 'ImportFeatureValuesResponse', + 'ListEntityTypesRequest', + 'ListEntityTypesResponse', + 'ListFeaturesRequest', + 'ListFeaturesResponse', + 'ListFeaturestoresRequest', + 'ListFeaturestoresResponse', + 'SearchFeaturesRequest', + 'SearchFeaturesResponse', + 'UpdateEntityTypeRequest', + 'UpdateFeatureRequest', + 'UpdateFeaturestoreOperationMetadata', + 'UpdateFeaturestoreRequest', + 'HyperparameterTuningJob', + 'Index', + 'DeployedIndex', + 'DeployedIndexAuthConfig', + 'IndexEndpoint', + 'IndexPrivateEndpoints', + 'CreateIndexEndpointOperationMetadata', + 'CreateIndexEndpointRequest', + 'DeleteIndexEndpointRequest', + 'DeployIndexOperationMetadata', + 'DeployIndexRequest', + 'DeployIndexResponse', + 'GetIndexEndpointRequest', + 'ListIndexEndpointsRequest', + 'ListIndexEndpointsResponse', + 'UndeployIndexOperationMetadata', + 'UndeployIndexRequest', + 'UndeployIndexResponse', + 'UpdateIndexEndpointRequest', + 'CreateIndexOperationMetadata', + 'CreateIndexRequest', + 'DeleteIndexRequest', + 'GetIndexRequest', + 'ListIndexesRequest', + 'ListIndexesResponse', + 'NearestNeighborSearchOperationMetadata', + 'UpdateIndexOperationMetadata', + 'UpdateIndexRequest', + 'AvroSource', + 'BigQueryDestination', + 'BigQuerySource', + 'ContainerRegistryDestination', + 'CsvDestination', + 'CsvSource', + 'GcsDestination', + 'GcsSource', + 'TFRecordDestination', + 'CancelBatchPredictionJobRequest', + 'CancelCustomJobRequest', + 'CancelDataLabelingJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'CreateCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'CreateModelDeploymentMonitoringJobRequest', + 'DeleteBatchPredictionJobRequest', + 'DeleteCustomJobRequest', + 'DeleteDataLabelingJobRequest', + 'DeleteHyperparameterTuningJobRequest', + 'DeleteModelDeploymentMonitoringJobRequest', + 'GetBatchPredictionJobRequest', + 'GetCustomJobRequest', + 'GetDataLabelingJobRequest', + 'GetHyperparameterTuningJobRequest', + 'GetModelDeploymentMonitoringJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'ListModelDeploymentMonitoringJobsRequest', + 'ListModelDeploymentMonitoringJobsResponse', + 'PauseModelDeploymentMonitoringJobRequest', + 'ResumeModelDeploymentMonitoringJobRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', + 'UpdateModelDeploymentMonitoringJobOperationMetadata', + 'UpdateModelDeploymentMonitoringJobRequest', + 'JobState', + 'LineageSubgraph', + 'AutomaticResources', + 'AutoscalingMetricSpec', + 'BatchDedicatedResources', + 'DedicatedResources', + 'DiskSpec', + 'MachineSpec', + 'ResourcesConsumed', + 'ManualBatchTuningParameters', + 'MetadataSchema', + 'AddContextArtifactsAndExecutionsRequest', + 'AddContextArtifactsAndExecutionsResponse', + 'AddContextChildrenRequest', + 'AddContextChildrenResponse', + 'AddExecutionEventsRequest', + 'AddExecutionEventsResponse', + 'CreateArtifactRequest', + 'CreateContextRequest', + 'CreateExecutionRequest', + 'CreateMetadataSchemaRequest', + 'CreateMetadataStoreOperationMetadata', + 'CreateMetadataStoreRequest', + 'DeleteContextRequest', + 'DeleteMetadataStoreOperationMetadata', + 'DeleteMetadataStoreRequest', + 'GetArtifactRequest', + 'GetContextRequest', + 'GetExecutionRequest', + 'GetMetadataSchemaRequest', + 'GetMetadataStoreRequest', + 'ListArtifactsRequest', + 'ListArtifactsResponse', + 'ListContextsRequest', + 'ListContextsResponse', + 'ListExecutionsRequest', + 'ListExecutionsResponse', + 'ListMetadataSchemasRequest', + 'ListMetadataSchemasResponse', + 'ListMetadataStoresRequest', + 'ListMetadataStoresResponse', + 'QueryArtifactLineageSubgraphRequest', + 'QueryContextLineageSubgraphRequest', + 'QueryExecutionInputsAndOutputsRequest', + 'UpdateArtifactRequest', + 'UpdateContextRequest', + 'UpdateExecutionRequest', + 'MetadataStore', + 'MigratableResource', + 'BatchMigrateResourcesOperationMetadata', + 'BatchMigrateResourcesRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceRequest', + 'MigrateResourceResponse', + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'Model', + 'ModelContainerSpec', + 'Port', + 'PredictSchemata', + 'ModelDeploymentMonitoringBigQueryTable', + 'ModelDeploymentMonitoringJob', + 'ModelDeploymentMonitoringObjectiveConfig', + 'ModelDeploymentMonitoringScheduleConfig', + 'ModelMonitoringStatsAnomalies', + 'ModelDeploymentMonitoringObjectiveType', + 'ModelEvaluation', + 'ModelEvaluationSlice', + 'ModelMonitoringAlertConfig', + 'ModelMonitoringObjectiveConfig', + 'SamplingStrategy', + 'ThresholdConfig', + 'DeleteModelRequest', + 'ExportModelOperationMetadata', + 'ExportModelRequest', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'GetModelEvaluationSliceRequest', + 'GetModelRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'UpdateModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelRequest', + 'UploadModelResponse', + 'DeleteOperationMetadata', + 'GenericOperationMetadata', + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + 'CancelPipelineJobRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'CreateTrainingPipelineRequest', + 'DeletePipelineJobRequest', + 'DeleteTrainingPipelineRequest', + 'GetPipelineJobRequest', + 'GetTrainingPipelineRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'PipelineState', + 'ExplainRequest', + 'ExplainResponse', + 'PredictRequest', + 'PredictResponse', + 'SpecialistPool', + 'CreateSpecialistPoolOperationMetadata', + 'CreateSpecialistPoolRequest', + 'DeleteSpecialistPoolRequest', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'UpdateSpecialistPoolOperationMetadata', + 'UpdateSpecialistPoolRequest', + 'Measurement', + 'Study', + 'StudySpec', + 'Trial', + 'Tensorboard', + 'Scalar', + 'TensorboardBlob', + 'TensorboardBlobSequence', + 'TensorboardTensor', + 'TimeSeriesData', + 'TimeSeriesDataPoint', + 'TensorboardExperiment', + 'TensorboardRun', + 'CreateTensorboardExperimentRequest', + 'CreateTensorboardOperationMetadata', + 'CreateTensorboardRequest', + 'CreateTensorboardRunRequest', + 'CreateTensorboardTimeSeriesRequest', + 'DeleteTensorboardExperimentRequest', + 'DeleteTensorboardRequest', + 'DeleteTensorboardRunRequest', + 'DeleteTensorboardTimeSeriesRequest', + 'ExportTensorboardTimeSeriesDataRequest', + 'ExportTensorboardTimeSeriesDataResponse', + 'GetTensorboardExperimentRequest', + 'GetTensorboardRequest', + 'GetTensorboardRunRequest', + 'GetTensorboardTimeSeriesRequest', + 'ListTensorboardExperimentsRequest', + 'ListTensorboardExperimentsResponse', + 'ListTensorboardRunsRequest', + 'ListTensorboardRunsResponse', + 'ListTensorboardsRequest', + 'ListTensorboardsResponse', + 'ListTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesResponse', + 'ReadTensorboardBlobDataRequest', + 'ReadTensorboardBlobDataResponse', + 'ReadTensorboardTimeSeriesDataRequest', + 'ReadTensorboardTimeSeriesDataResponse', + 'UpdateTensorboardExperimentRequest', + 'UpdateTensorboardOperationMetadata', + 'UpdateTensorboardRequest', + 'UpdateTensorboardRunRequest', + 'UpdateTensorboardTimeSeriesRequest', + 'WriteTensorboardRunDataRequest', + 'WriteTensorboardRunDataResponse', + 'TensorboardTimeSeries', + 'FilterSplit', + 'FractionSplit', + 'InputDataConfig', + 'PredefinedSplit', + 'TimestampSplit', + 'TrainingPipeline', + 'BoolArray', + 'DoubleArray', + 'Int64Array', + 'StringArray', + 'UserActionReference', + 'Value', + 'AddTrialMeasurementRequest', + 'CheckTrialEarlyStoppingStateMetatdata', + 'CheckTrialEarlyStoppingStateRequest', + 'CheckTrialEarlyStoppingStateResponse', + 'CompleteTrialRequest', + 'CreateStudyRequest', + 'CreateTrialRequest', + 'DeleteStudyRequest', + 'DeleteTrialRequest', + 'GetStudyRequest', + 'GetTrialRequest', + 'ListOptimalTrialsRequest', + 'ListOptimalTrialsResponse', + 'ListStudiesRequest', + 'ListStudiesResponse', + 'ListTrialsRequest', + 'ListTrialsResponse', + 'LookupStudyRequest', + 'StopTrialRequest', + 'SuggestTrialsMetadata', + 'SuggestTrialsRequest', + 'SuggestTrialsResponse', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed new file mode 100644 index 0000000000..228f1c51c6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..5f9e065de0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ImageClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_segmentation import ImageSegmentationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_classification import TextClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_extraction import TextExtractionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_sentiment import TextSentimentPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_classification import VideoClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ('ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..41ab5407a7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ( +'ImageClassificationPredictionInstance', +'ImageObjectDetectionPredictionInstance', +'ImageSegmentationPredictionInstance', +'TextClassificationPredictionInstance', +'TextExtractionPredictionInstance', +'TextSentimentPredictionInstance', +'VideoActionRecognitionPredictionInstance', +'VideoClassificationPredictionInstance', +'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..38379e8208 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py new file mode 100644 index 0000000000..80a5332604 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + 'ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..aac9e2bc91 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageClassificationPredictionInstance', + }, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..80a5d797a1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageObjectDetectionPredictionInstance', + }, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..e1b5cfc21f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageSegmentationPredictionInstance', + }, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py new file mode 100644 index 0000000000..0c1ea43a72 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextClassificationPredictionInstance', + }, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..8f47b27080 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextExtractionPredictionInstance', + }, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. Vertex AI will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + key = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..ab416779b6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextSentimentPredictionInstance', + }, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..c7a76efda2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoActionRecognitionPredictionInstance', + }, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..56d662ef88 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoClassificationPredictionInstance', + }, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..7344d419a8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoObjectTrackingPredictionInstance', + }, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..464c39f26c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ImageClassificationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_segmentation import ImageSegmentationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_classification import VideoClassificationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ('ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py new file mode 100644 index 0000000000..91b718b437 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ( +'ImageClassificationPredictionParams', +'ImageObjectDetectionPredictionParams', +'ImageSegmentationPredictionParams', +'VideoActionRecognitionPredictionParams', +'VideoClassificationPredictionParams', +'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..6b925dd9dc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py new file mode 100644 index 0000000000..70a92bb59c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + 'ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..67c5453a93 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageClassificationPredictionParams', + }, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..baed8905ee --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageObjectDetectionPredictionParams', + }, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..8a5e999504 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageSegmentationPredictionParams', + }, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..37a8c2bc9c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoActionRecognitionPredictionParams', + }, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..ff7bbd1b86 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoClassificationPredictionParams', + }, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. Vertex AI returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. Vertex AI determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. Vertex AI then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + segment_classification = proto.Field( + proto.BOOL, + number=3, + ) + shot_classification = proto.Field( + proto.BOOL, + number=4, + ) + one_sec_interval_classification = proto.Field( + proto.BOOL, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..4e0e97f8d6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoObjectTrackingPredictionParams', + }, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + min_bounding_box_size = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..0b54451ca0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_segmentation import ImageSegmentationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_classification import TabularClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_regression import TabularRegressionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_extraction import TextExtractionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import TextSentimentPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_classification import VideoClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ('ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'TimeSeriesForecastingPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..495759c24b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ( +'ClassificationPredictionResult', +'ImageObjectDetectionPredictionResult', +'ImageSegmentationPredictionResult', +'TabularClassificationPredictionResult', +'TabularRegressionPredictionResult', +'TextExtractionPredictionResult', +'TextSentimentPredictionResult', +'TimeSeriesForecastingPredictionResult', +'VideoActionRecognitionPredictionResult', +'VideoClassificationPredictionResult', +'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..99d3dc6402 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py new file mode 100644 index 0000000000..f3b70f66dd --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .time_series_forecasting import ( + TimeSeriesForecastingPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + 'ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'TimeSeriesForecastingPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py new file mode 100644 index 0000000000..ecd16c7250 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ClassificationPredictionResult', + }, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..d787871e99 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ImageObjectDetectionPredictionResult', + }, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + bboxes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=struct_pb2.ListValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..92cc20720c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ImageSegmentationPredictionResult', + }, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + Attributes: + category_mask (str): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (str): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field( + proto.STRING, + number=1, + ) + confidence_mask = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py new file mode 100644 index 0000000000..8a437022fd --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TabularClassificationPredictionResult', + }, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField( + proto.STRING, + number=1, + ) + scores = proto.RepeatedField( + proto.FLOAT, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py new file mode 100644 index 0000000000..a49f6f55ce --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TabularRegressionPredictionResult', + }, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + lower_bound = proto.Field( + proto.FLOAT, + number=2, + ) + upper_bound = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..a92d9caefa --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TextExtractionPredictionResult', + }, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + text_segment_start_offsets = proto.RepeatedField( + proto.INT64, + number=3, + ) + text_segment_end_offsets = proto.RepeatedField( + proto.INT64, + number=4, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..4967b02aae --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TextSentimentPredictionResult', + }, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Prediction output format for Text Sentiment + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py new file mode 100644 index 0000000000..67a3cd9dff --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TimeSeriesForecastingPredictionResult', + }, +) + + +class TimeSeriesForecastingPredictionResult(proto.Message): + r"""Prediction output format for Time Series Forecasting. + Attributes: + value (float): + The regression value. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..bc53328da4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoActionRecognitionPredictionResult', + }, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..95439add5e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoClassificationPredictionResult', + }, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + type_ = proto.Field( + proto.STRING, + number=3, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..34cf7ab1b9 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoObjectTrackingPredictionResult', + }, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (google.protobuf.duration_pb2.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (google.protobuf.wrappers_pb2.FloatValue): + The leftmost coordinate of the bounding box. + x_max (google.protobuf.wrappers_pb2.FloatValue): + The rightmost coordinate of the bounding box. + y_min (google.protobuf.wrappers_pb2.FloatValue): + The topmost coordinate of the bounding box. + y_max (google.protobuf.wrappers_pb2.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + x_min = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.FloatValue, + ) + x_max = proto.Field( + proto.MESSAGE, + number=3, + message=wrappers_pb2.FloatValue, + ) + y_min = proto.Field( + proto.MESSAGE, + number=4, + message=wrappers_pb2.FloatValue, + ) + y_max = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + frames = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=Frame, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py new file mode 100644 index 0000000000..b31ea1309d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassification +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassificationInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassificationMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetection +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentation +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentationInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTables +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTablesInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTablesMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import AutoMlTextClassification +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import AutoMlTextClassificationInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import AutoMlTextExtraction +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import AutoMlTextExtractionInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import AutoMlTextSentiment +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import AutoMlTextSentimentInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecasting +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecastingInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecastingMetadata +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import AutoMlVideoActionRecognition +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import AutoMlVideoClassification +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import AutoMlVideoClassificationInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import AutoMlVideoObjectTracking +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + +__all__ = ('AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + 'AutoMlForecasting', + 'AutoMlForecastingInputs', + 'AutoMlForecastingMetadata', + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + 'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed new file mode 100644 index 0000000000..98af260cd7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py new file mode 100644 index 0000000000..83f5b0d33f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.automl_image_classification import AutoMlImageClassification +from .types.automl_image_classification import AutoMlImageClassificationInputs +from .types.automl_image_classification import AutoMlImageClassificationMetadata +from .types.automl_image_object_detection import AutoMlImageObjectDetection +from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs +from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata +from .types.automl_image_segmentation import AutoMlImageSegmentation +from .types.automl_image_segmentation import AutoMlImageSegmentationInputs +from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata +from .types.automl_tables import AutoMlTables +from .types.automl_tables import AutoMlTablesInputs +from .types.automl_tables import AutoMlTablesMetadata +from .types.automl_text_classification import AutoMlTextClassification +from .types.automl_text_classification import AutoMlTextClassificationInputs +from .types.automl_text_extraction import AutoMlTextExtraction +from .types.automl_text_extraction import AutoMlTextExtractionInputs +from .types.automl_text_sentiment import AutoMlTextSentiment +from .types.automl_text_sentiment import AutoMlTextSentimentInputs +from .types.automl_time_series_forecasting import AutoMlForecasting +from .types.automl_time_series_forecasting import AutoMlForecastingInputs +from .types.automl_time_series_forecasting import AutoMlForecastingMetadata +from .types.automl_video_action_recognition import AutoMlVideoActionRecognition +from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs +from .types.automl_video_classification import AutoMlVideoClassification +from .types.automl_video_classification import AutoMlVideoClassificationInputs +from .types.automl_video_object_tracking import AutoMlVideoObjectTracking +from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs +from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig + +__all__ = ( +'AutoMlForecasting', +'AutoMlForecastingInputs', +'AutoMlForecastingMetadata', +'AutoMlImageClassification', +'AutoMlImageClassificationInputs', +'AutoMlImageClassificationMetadata', +'AutoMlImageObjectDetection', +'AutoMlImageObjectDetectionInputs', +'AutoMlImageObjectDetectionMetadata', +'AutoMlImageSegmentation', +'AutoMlImageSegmentationInputs', +'AutoMlImageSegmentationMetadata', +'AutoMlTables', +'AutoMlTablesInputs', +'AutoMlTablesMetadata', +'AutoMlTextClassification', +'AutoMlTextClassificationInputs', +'AutoMlTextExtraction', +'AutoMlTextExtractionInputs', +'AutoMlTextSentiment', +'AutoMlTextSentimentInputs', +'AutoMlVideoActionRecognition', +'AutoMlVideoActionRecognitionInputs', +'AutoMlVideoClassification', +'AutoMlVideoClassificationInputs', +'AutoMlVideoObjectTracking', +'AutoMlVideoObjectTrackingInputs', +'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..6de794c90a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed new file mode 100644 index 0000000000..98af260cd7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py new file mode 100644 index 0000000000..c120afb8d9 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .automl_image_classification import ( + AutoMlImageClassification, + AutoMlImageClassificationInputs, + AutoMlImageClassificationMetadata, +) +from .automl_image_object_detection import ( + AutoMlImageObjectDetection, + AutoMlImageObjectDetectionInputs, + AutoMlImageObjectDetectionMetadata, +) +from .automl_image_segmentation import ( + AutoMlImageSegmentation, + AutoMlImageSegmentationInputs, + AutoMlImageSegmentationMetadata, +) +from .automl_tables import ( + AutoMlTables, + AutoMlTablesInputs, + AutoMlTablesMetadata, +) +from .automl_text_classification import ( + AutoMlTextClassification, + AutoMlTextClassificationInputs, +) +from .automl_text_extraction import ( + AutoMlTextExtraction, + AutoMlTextExtractionInputs, +) +from .automl_text_sentiment import ( + AutoMlTextSentiment, + AutoMlTextSentimentInputs, +) +from .automl_time_series_forecasting import ( + AutoMlForecasting, + AutoMlForecastingInputs, + AutoMlForecastingMetadata, +) +from .automl_video_action_recognition import ( + AutoMlVideoActionRecognition, + AutoMlVideoActionRecognitionInputs, +) +from .automl_video_classification import ( + AutoMlVideoClassification, + AutoMlVideoClassificationInputs, +) +from .automl_video_object_tracking import ( + AutoMlVideoObjectTracking, + AutoMlVideoObjectTrackingInputs, +) +from .export_evaluated_data_items_config import ( + ExportEvaluatedDataItemsConfig, +) + +__all__ = ( + 'AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + 'AutoMlForecasting', + 'AutoMlForecastingInputs', + 'AutoMlForecastingMetadata', + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + 'ExportEvaluatedDataItemsConfig', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py new file mode 100644 index 0000000000..70afa83c40 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageClassification', + 'AutoMlImageClassificationInputs', + 'AutoMlImageClassificationMetadata', + }, +) + + +class AutoMlImageClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageClassificationInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageClassificationMetadata', + ) + + +class AutoMlImageClassificationInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs.ModelType): + + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 8,000 and + 800,000 milli node hours, inclusive. The default value is + 192,000 which represents one day in wall time, considering 8 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, + the training budget must be between 1,000 and 100,000 milli + node hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Classification might stop training + before the entire training budget has been used. + multi_label (bool): + If false, a single-label (multi-class) Model + will be trained (i.e. assuming that for each + image just up to one annotation may be + applicable). If true, a multi-label Model will + be trained (i.e. assuming that for each image + multiple annotations may be applicable). + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_TF_LOW_LATENCY_1 = 2 + MOBILE_TF_VERSATILE_1 = 3 + MOBILE_TF_HIGH_ACCURACY_1 = 4 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + base_model_id = proto.Field( + proto.STRING, + number=2, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=3, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=4, + ) + multi_label = proto.Field( + proto.BOOL, + number=5, + ) + + +class AutoMlImageClassificationMetadata(proto.Message): + r""" + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py new file mode 100644 index 0000000000..eba2aa5fce --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageObjectDetection', + 'AutoMlImageObjectDetectionInputs', + 'AutoMlImageObjectDetectionMetadata', + }, +) + + +class AutoMlImageObjectDetection(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image Object + Detection Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata): + The metadata information + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageObjectDetectionInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageObjectDetectionMetadata', + ) + + +class AutoMlImageObjectDetectionInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. For modelType + ``cloud``\ (default), the budget must be between 20,000 and + 900,000 milli node hours, inclusive. The default value is + 216,000 which represents one day in wall time, considering 9 + nodes are used. For model types ``mobile-tf-low-latency-1``, + ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the + training budget must be between 1,000 and 100,000 milli node + hours, inclusive. The default value is 24,000 which + represents one day in wall time on a single node that is + used. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. When false the early + stopping feature is enabled, which means that + AutoML Image Object Detection might stop + training before the entire training budget has + been used. + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_LATENCY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + MOBILE_TF_VERSATILE_1 = 4 + MOBILE_TF_HIGH_ACCURACY_1 = 5 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=2, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=3, + ) + + +class AutoMlImageObjectDetectionMetadata(proto.Message): + r""" + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py new file mode 100644 index 0000000000..1bf67523b2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlImageSegmentation', + 'AutoMlImageSegmentationInputs', + 'AutoMlImageSegmentationMetadata', + }, +) + + +class AutoMlImageSegmentation(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Image + Segmentation Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlImageSegmentationInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlImageSegmentationMetadata', + ) + + +class AutoMlImageSegmentationInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs.ModelType): + + budget_milli_node_hours (int): + The training budget of creating this model, expressed in + milli node hours i.e. 1,000 value in this field means 1 node + hour. The actual metadata.costMilliNodeHours will be equal + or less than this value. If further model training ceases to + provide any improvements, it will stop without using the + full budget and the metadata.successfulStopReason will be + ``model-converged``. Note, node_hour = actual_hour \* + number_of_nodes_involved. Or actaul_wall_clock_hours = + train_budget_milli_node_hours / (number_of_nodes_involved \* + 1000) For modelType ``cloud-high-accuracy-1``\ (default), + the budget must be between 20,000 and 2,000,000 milli node + hours, inclusive. The default value is 192,000 which + represents one day in wall time (1000 milli \* 24 hours \* 8 + nodes). + base_model_id (str): + The ID of the ``base`` model. If it is specified, the new + model will be trained based on the ``base`` model. + Otherwise, the new model will be trained from scratch. The + ``base`` model must be in the same Project and Location as + the new Model to train, and have the same modelType. + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD_HIGH_ACCURACY_1 = 1 + CLOUD_LOW_ACCURACY_1 = 2 + MOBILE_TF_LOW_LATENCY_1 = 3 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + budget_milli_node_hours = proto.Field( + proto.INT64, + number=2, + ) + base_model_id = proto.Field( + proto.STRING, + number=3, + ) + + +class AutoMlImageSegmentationMetadata(proto.Message): + r""" + Attributes: + cost_milli_node_hours (int): + The actual training cost of creating this + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed + inputs.budgetMilliNodeHours. + successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason): + For successful job completions, this is the + reason why the job has finished. + """ + class SuccessfulStopReason(proto.Enum): + r"""""" + SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 + BUDGET_REACHED = 1 + MODEL_CONVERGED = 2 + + cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + successful_stop_reason = proto.Field( + proto.ENUM, + number=2, + enum=SuccessfulStopReason, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py new file mode 100644 index 0000000000..23d1375aea --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -0,0 +1,499 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlTables', + 'AutoMlTablesInputs', + 'AutoMlTablesMetadata', + }, +) + + +class AutoMlTables(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Tables Model. + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTablesInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlTablesMetadata', + ) + + +class AutoMlTablesInputs(proto.Message): + r""" + Attributes: + optimization_objective_recall_value (float): + Required when optimization_objective is + "maximize-precision-at-recall". Must be between 0 and 1, + inclusive. + optimization_objective_precision_value (float): + Required when optimization_objective is + "maximize-recall-at-precision". Must be between 0 and 1, + inclusive. + prediction_type (str): + The type of prediction the Model is to + produce. "classification" - Predict one out of + multiple target values is + picked for each row. + "regression" - Predict a value based on its + relation to other values. This + type is available only to columns that contain + semantically numeric values, i.e. integers or + floating point number, even if + stored as e.g. strings. + target_column (str): + The column name of the target column that the + model is to predict. + transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing + towards. The training process creates a model + that maximizes/minimizes the value of the + objective function over the validation set. + + The supported optimization objectives depend on + the prediction type. If the field is not set, a + default objective function is used. + classification (binary): + "maximize-au-roc" (default) - Maximize the + area under the receiver + operating characteristic (ROC) curve. + "minimize-log-loss" - Minimize log loss. + "maximize-au-prc" - Maximize the area under + the precision-recall curve. "maximize- + precision-at-recall" - Maximize precision for a + specified + recall value. "maximize-recall-at-precision" - + Maximize recall for a specified + precision value. + classification (multi-class): + "minimize-log-loss" (default) - Minimize log + loss. + regression: + "minimize-rmse" (default) - Minimize root- + mean-squared error (RMSE). "minimize-mae" - + Minimize mean-absolute error (MAE). "minimize- + rmsle" - Minimize root-mean-squared log error + (RMSLE). + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + disable_early_stopping (bool): + Use the entire training budget. This disables + the early stopping feature. By default, the + early stopping feature is enabled, which means + that AutoML Tables might stop training before + the entire training budget has been used. + weight_column_name (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + additional_experiments (Sequence[str]): + Additional experiment flags for the Tables + training pipeline. + """ + + class Transformation(proto.Message): + r""" + Attributes: + auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.AutoTransformation): + + numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericTransformation): + + categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation): + + timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TimestampTransformation): + + text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextTransformation): + + repeated_numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation): + + repeated_categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): + + repeated_text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation): + + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + - The z_score of the value. + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=2, + ) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + - Determine the year, month, day,and weekday. Treat each value from + the + - timestamp as a Categorical column. + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + - ``unix-milliseconds`` + - ``unix-microseconds`` + - ``unix-nanoseconds`` (for respectively number of seconds, + milliseconds, microseconds and nanoseconds since start of + the Unix epoch); or be written in ``strftime`` syntax. If + time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = + ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + time_format = proto.Field( + proto.STRING, + number=2, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=3, + ) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + - Tokenize text to words. Convert each words to a dictionary lookup + index and generate an embedding for each index. Combine the + embedding of all elements into a single embedding using the mean. + - Tokenization is based on unicode script boundaries. + - Missing values get their own lookup index and resulting + embedding. + - Stop-words receive no special treatment and are not removed. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericArrayTransformation(proto.Message): + r"""Treats the column as numerical array and performs following + transformation functions. + + - All transformations for Numerical types applied to the average of + the all elements. + - The average of empty arrays is treated as zero. + + Attributes: + column_name (str): + + invalid_values_allowed (bool): + If invalid values is allowed, the training + pipeline will create a boolean feature that + indicated whether the value is valid. Otherwise, + the training pipeline will discard the input row + from trainining data. + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + invalid_values_allowed = proto.Field( + proto.BOOL, + number=2, + ) + + class CategoricalArrayTransformation(proto.Message): + r"""Treats the column as categorical array and performs following + transformation functions. + + - For each element in the array, convert the category name to a + dictionary lookup index and generate an embedding for each index. + Combine the embedding of all elements into a single embedding + using the mean. + - Empty arrays treated as an embedding of zeroes. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TextArrayTransformation(proto.Message): + r"""Treats the column as text array and performs following + transformation functions. + + - Concatenate all text values in the array into a single text value + using a space (" ") as a delimiter, and then treat the result as + a single text value. Apply the transformations for Text columns. + - Empty arrays treated as an empty text. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.AutoTransformation', + ) + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.NumericTransformation', + ) + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.CategoricalTransformation', + ) + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TimestampTransformation', + ) + text = proto.Field( + proto.MESSAGE, + number=5, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TextTransformation', + ) + repeated_numeric = proto.Field( + proto.MESSAGE, + number=6, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.NumericArrayTransformation', + ) + repeated_categorical = proto.Field( + proto.MESSAGE, + number=7, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.CategoricalArrayTransformation', + ) + repeated_text = proto.Field( + proto.MESSAGE, + number=8, + oneof='transformation_detail', + message='AutoMlTablesInputs.Transformation.TextArrayTransformation', + ) + + optimization_objective_recall_value = proto.Field( + proto.FLOAT, + number=5, + oneof='additional_optimization_objective_config', + ) + optimization_objective_precision_value = proto.Field( + proto.FLOAT, + number=6, + oneof='additional_optimization_objective_config', + ) + prediction_type = proto.Field( + proto.STRING, + number=1, + ) + target_column = proto.Field( + proto.STRING, + number=2, + ) + transformations = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Transformation, + ) + optimization_objective = proto.Field( + proto.STRING, + number=4, + ) + train_budget_milli_node_hours = proto.Field( + proto.INT64, + number=7, + ) + disable_early_stopping = proto.Field( + proto.BOOL, + number=8, + ) + weight_column_name = proto.Field( + proto.STRING, + number=9, + ) + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=10, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + additional_experiments = proto.RepeatedField( + proto.STRING, + number=11, + ) + + +class AutoMlTablesMetadata(proto.Message): + r"""Model metadata specific to AutoML Tables. + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py new file mode 100644 index 0000000000..6844219d37 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextClassification', + 'AutoMlTextClassificationInputs', + }, +) + + +class AutoMlTextClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextClassificationInputs', + ) + + +class AutoMlTextClassificationInputs(proto.Message): + r""" + Attributes: + multi_label (bool): + + """ + + multi_label = proto.Field( + proto.BOOL, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py new file mode 100644 index 0000000000..0f03e2f581 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextExtraction', + 'AutoMlTextExtractionInputs', + }, +) + + +class AutoMlTextExtraction(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Extraction Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextExtractionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextExtractionInputs', + ) + + +class AutoMlTextExtractionInputs(proto.Message): + r""" """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py new file mode 100644 index 0000000000..1b5505b69d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlTextSentiment', + 'AutoMlTextSentimentInputs', + }, +) + + +class AutoMlTextSentiment(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Text + Sentiment Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextSentimentInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlTextSentimentInputs', + ) + + +class AutoMlTextSentimentInputs(proto.Message): + r""" + Attributes: + sentiment_max (int): + A sentiment is expressed as an integer + ordinal, where higher value means a more + positive sentiment. The range of sentiments that + will be used is between 0 and sentimentMax + (inclusive on both ends), and all the values in + the range must be represented in the dataset + before a model can be created. + Only the Annotations with this sentimentMax will + be used for training. sentimentMax value must be + between 1 and 10 (inclusive). + """ + + sentiment_max = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py new file mode 100644 index 0000000000..89caa6da88 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py @@ -0,0 +1,477 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlForecasting', + 'AutoMlForecastingInputs', + 'AutoMlForecastingMetadata', + }, +) + + +class AutoMlForecasting(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Forecasting + Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs): + The input parameters of this TrainingJob. + metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingMetadata): + The metadata information. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlForecastingInputs', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='AutoMlForecastingMetadata', + ) + + +class AutoMlForecastingInputs(proto.Message): + r""" + Attributes: + target_column (str): + The name of the column that the model is to + predict. + time_series_identifier_column (str): + The name of the column that identifies the + time series. + time_column (str): + The name of the column that identifies time + order in the time series. + transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation]): + Each transformation will apply transform + function to given input column. And the result + will be used for training. When creating + transformation for BigQuery Struct column, the + column should be flattened using "." as the + delimiter. + optimization_objective (str): + Objective function the model is optimizing towards. The + training process creates a model that optimizes the value of + the objective function over the validation set. + + The supported optimization objectives: + + - "minimize-rmse" (default) - Minimize root-mean-squared + error (RMSE). + + - "minimize-mae" - Minimize mean-absolute error (MAE). + + - "minimize-rmsle" - Minimize root-mean-squared log error + (RMSLE). + + - "minimize-rmspe" - Minimize root-mean-squared percentage + error (RMSPE). + + - "minimize-wape-mae" - Minimize the combination of + weighted absolute percentage error (WAPE) and + mean-absolute-error (MAE). + + - "minimize-quantile-loss" - Minimize the quantile loss at + the quantiles defined in ``quantiles``. + train_budget_milli_node_hours (int): + Required. The train budget of creating this + model, expressed in milli node hours i.e. 1,000 + value in this field means 1 node hour. + The training cost of the model will not exceed + this budget. The final cost will be attempted to + be close to the budget, though may end up being + (even) noticeably smaller - at the backend's + discretion. This especially may happen when + further model training ceases to provide any + improvements. + If the budget is set to a value known to be + insufficient to train a model for the given + dataset, the training won't be attempted and + will error. + + The train budget must be between 1,000 and + 72,000 milli node hours, inclusive. + weight_column (str): + Column name that should be used as the weight + column. Higher values in this column give more + importance to the row during model training. The + column must have numeric values between 0 and + 10000 inclusively; 0 means the row is ignored + for training. If weight column field is not set, + then all rows are assumed to have equal weight + of 1. + time_series_attribute_columns (Sequence[str]): + Column names that should be used as attribute + columns. The value of these columns does not + vary as a function of time. For example, store + ID or item color. + unavailable_at_forecast_columns (Sequence[str]): + Names of columns that are unavailable when a forecast is + requested. This column contains information for the given + entity (identified by the time_series_identifier_column) + that is unknown before the forecast For example, actual + weather on a given day. + available_at_forecast_columns (Sequence[str]): + Names of columns that are available and provided when a + forecast is requested. These columns contain information for + the given entity (identified by the + time_series_identifier_column column) that is known at + forecast. For example, predicted weather for a specific day. + data_granularity (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Granularity): + Expected difference in time granularity + between rows in the data. + forecast_horizon (int): + The amount of time into the future for which forecasted + values for the target are returned. Expressed in number of + units defined by the ``data_granularity`` field. + context_window (int): + The amount of time into the past training and prediction + data is used for model training and prediction respectively. + Expressed in number of units defined by the + ``data_granularity`` field. + export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig): + Configuration for exporting test set + predictions to a BigQuery table. If this + configuration is absent, then the export is not + performed. + quantiles (Sequence[float]): + Quantiles to use for minimize-quantile-loss + ``optimization_objective``. Up to 5 quantiles are allowed of + values between 0 and 1, exclusive. Required if the value of + optimization_objective is minimize-quantile-loss. Represents + the percent quantiles to use for that objective. Quantiles + must be unique. + validation_options (str): + Validation options for the data validation component. The + available options are: + + - "fail-pipeline" - default, will validate against the + validation and fail the pipeline if it fails. + + - "ignore-validation" - ignore the results of the + validation and continue + additional_experiments (Sequence[str]): + Additional experiment flags for the time + series forcasting training. + """ + + class Transformation(proto.Message): + r""" + Attributes: + auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.AutoTransformation): + + numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.NumericTransformation): + + categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.CategoricalTransformation): + + timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TimestampTransformation): + + text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TextTransformation): + + """ + + class AutoTransformation(proto.Message): + r"""Training pipeline will infer the proper transformation based + on the statistic of dataset. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class NumericTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The value converted to float32. + + - The z_score of the value. + + - log(value+1) when the value is greater than or equal to 0. + Otherwise, this transformation is not applied and the value is + considered a missing value. + + - z_score of log(value+1) when the value is greater than or equal + to 0. Otherwise, this transformation is not applied and the value + is considered a missing value. + + - A boolean value that indicates whether the value is valid. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class CategoricalTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The categorical string as is--no change to case, punctuation, + spelling, tense, and so on. + + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + + - Categories that appear less than 5 times in the training dataset + are treated as the "unknown" category. The "unknown" category + gets its own special lookup index and resulting embedding. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + class TimestampTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - Apply the transformation functions for Numerical columns. + + - Determine the year, month, day,and weekday. Treat each value from + the timestamp as a Categorical column. + + - Invalid numerical values (for example, values that fall outside + of a typical timestamp range, or are extreme values) receive no + special treatment and are not removed. + + Attributes: + column_name (str): + + time_format (str): + The format in which that time field is expressed. The + time_format must either be one of: + + - ``unix-seconds`` + + - ``unix-milliseconds`` + + - ``unix-microseconds`` + + - ``unix-nanoseconds`` + + (for respectively number of seconds, milliseconds, + microseconds and nanoseconds since start of the Unix epoch); + + or be written in ``strftime`` syntax. + + If time_format is not set, then the default format is RFC + 3339 ``date-time`` format, where ``time-offset`` = ``"Z"`` + (e.g. 1985-04-12T23:20:50.52Z) + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + time_format = proto.Field( + proto.STRING, + number=2, + ) + + class TextTransformation(proto.Message): + r"""Training pipeline will perform following transformation functions. + + - The text as is--no change to case, punctuation, spelling, tense, + and so on. + + - Convert the category name to a dictionary lookup index and + generate an embedding for each index. + + Attributes: + column_name (str): + + """ + + column_name = proto.Field( + proto.STRING, + number=1, + ) + + auto = proto.Field( + proto.MESSAGE, + number=1, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.AutoTransformation', + ) + numeric = proto.Field( + proto.MESSAGE, + number=2, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.NumericTransformation', + ) + categorical = proto.Field( + proto.MESSAGE, + number=3, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.CategoricalTransformation', + ) + timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.TimestampTransformation', + ) + text = proto.Field( + proto.MESSAGE, + number=5, + oneof='transformation_detail', + message='AutoMlForecastingInputs.Transformation.TextTransformation', + ) + + class Granularity(proto.Message): + r"""A duration of time expressed in time granularity units. + Attributes: + unit (str): + The time granularity unit of this time period. The supported + units are: + + - "minute" + + - "hour" + + - "day" + + - "week" + + - "month" + + - "year". + quantity (int): + The number of granularity_units between data points in the + training data. If ``granularity_unit`` is ``minute``, can be + 1, 5, 10, 15, or 30. For all other values of + ``granularity_unit``, must be 1. + """ + + unit = proto.Field( + proto.STRING, + number=1, + ) + quantity = proto.Field( + proto.INT64, + number=2, + ) + + target_column = proto.Field( + proto.STRING, + number=1, + ) + time_series_identifier_column = proto.Field( + proto.STRING, + number=2, + ) + time_column = proto.Field( + proto.STRING, + number=3, + ) + transformations = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=Transformation, + ) + optimization_objective = proto.Field( + proto.STRING, + number=5, + ) + train_budget_milli_node_hours = proto.Field( + proto.INT64, + number=6, + ) + weight_column = proto.Field( + proto.STRING, + number=7, + ) + time_series_attribute_columns = proto.RepeatedField( + proto.STRING, + number=19, + ) + unavailable_at_forecast_columns = proto.RepeatedField( + proto.STRING, + number=20, + ) + available_at_forecast_columns = proto.RepeatedField( + proto.STRING, + number=21, + ) + data_granularity = proto.Field( + proto.MESSAGE, + number=22, + message=Granularity, + ) + forecast_horizon = proto.Field( + proto.INT64, + number=23, + ) + context_window = proto.Field( + proto.INT64, + number=24, + ) + export_evaluated_data_items_config = proto.Field( + proto.MESSAGE, + number=15, + message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, + ) + quantiles = proto.RepeatedField( + proto.DOUBLE, + number=16, + ) + validation_options = proto.Field( + proto.STRING, + number=17, + ) + additional_experiments = proto.RepeatedField( + proto.STRING, + number=25, + ) + + +class AutoMlForecastingMetadata(proto.Message): + r"""Model metadata specific to AutoML Forecasting. + Attributes: + train_cost_milli_node_hours (int): + Output only. The actual training cost of the + model, expressed in milli node hours, i.e. 1,000 + value in this field means 1 node hour. + Guaranteed to not exceed the train budget. + """ + + train_cost_milli_node_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py new file mode 100644 index 0000000000..b9cce5bbe9 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoActionRecognition', + 'AutoMlVideoActionRecognitionInputs', + }, +) + + +class AutoMlVideoActionRecognition(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video Action + Recognition Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoActionRecognitionInputs', + ) + + +class AutoMlVideoActionRecognitionInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + MOBILE_CORAL_VERSATILE_1 = 4 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py new file mode 100644 index 0000000000..e2f0bc89e3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoClassification', + 'AutoMlVideoClassificationInputs', + }, +) + + +class AutoMlVideoClassification(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + Classification Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoClassificationInputs', + ) + + +class AutoMlVideoClassificationInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py new file mode 100644 index 0000000000..91f4d9d82a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'AutoMlVideoObjectTracking', + 'AutoMlVideoObjectTrackingInputs', + }, +) + + +class AutoMlVideoObjectTracking(proto.Message): + r"""A TrainingJob that trains and uploads an AutoML Video + ObjectTracking Model. + + Attributes: + inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs): + The input parameters of this TrainingJob. + """ + + inputs = proto.Field( + proto.MESSAGE, + number=1, + message='AutoMlVideoObjectTrackingInputs', + ) + + +class AutoMlVideoObjectTrackingInputs(proto.Message): + r""" + Attributes: + model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs.ModelType): + + """ + class ModelType(proto.Enum): + r"""""" + MODEL_TYPE_UNSPECIFIED = 0 + CLOUD = 1 + MOBILE_VERSATILE_1 = 2 + MOBILE_CORAL_VERSATILE_1 = 3 + MOBILE_CORAL_LOW_LATENCY_1 = 4 + MOBILE_JETSON_VERSATILE_1 = 5 + MOBILE_JETSON_LOW_LATENCY_1 = 6 + + model_type = proto.Field( + proto.ENUM, + number=1, + enum=ModelType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py new file mode 100644 index 0000000000..f2f42e233b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', + manifest={ + 'ExportEvaluatedDataItemsConfig', + }, +) + + +class ExportEvaluatedDataItemsConfig(proto.Message): + r"""Configuration for exporting test set predictions to a + BigQuery table. + + Attributes: + destination_bigquery_uri (str): + URI of desired destination BigQuery table. Expected format: + bq://:: + + If not specified, then results are exported to the following + auto-created BigQuery table: + :export_evaluated_examples__.evaluated_examples + override_existing_table (bool): + If true and an export destination is + specified, then the contents of the destination + are overwritten. Otherwise, if the export + destination already exists, then the export + operation fails. + """ + + destination_bigquery_uri = proto.Field( + proto.STRING, + number=1, + ) + override_existing_table = proto.Field( + proto.BOOL, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py new file mode 100644 index 0000000000..d356a80d31 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py @@ -0,0 +1,876 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .services.dataset_service import DatasetServiceClient +from .services.dataset_service import DatasetServiceAsyncClient +from .services.endpoint_service import EndpointServiceClient +from .services.endpoint_service import EndpointServiceAsyncClient +from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient +from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient +from .services.featurestore_service import FeaturestoreServiceClient +from .services.featurestore_service import FeaturestoreServiceAsyncClient +from .services.index_endpoint_service import IndexEndpointServiceClient +from .services.index_endpoint_service import IndexEndpointServiceAsyncClient +from .services.index_service import IndexServiceClient +from .services.index_service import IndexServiceAsyncClient +from .services.job_service import JobServiceClient +from .services.job_service import JobServiceAsyncClient +from .services.metadata_service import MetadataServiceClient +from .services.metadata_service import MetadataServiceAsyncClient +from .services.migration_service import MigrationServiceClient +from .services.migration_service import MigrationServiceAsyncClient +from .services.model_service import ModelServiceClient +from .services.model_service import ModelServiceAsyncClient +from .services.pipeline_service import PipelineServiceClient +from .services.pipeline_service import PipelineServiceAsyncClient +from .services.prediction_service import PredictionServiceClient +from .services.prediction_service import PredictionServiceAsyncClient +from .services.specialist_pool_service import SpecialistPoolServiceClient +from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient +from .services.tensorboard_service import TensorboardServiceClient +from .services.tensorboard_service import TensorboardServiceAsyncClient +from .services.vizier_service import VizierServiceClient +from .services.vizier_service import VizierServiceAsyncClient + +from .types.accelerator_type import AcceleratorType +from .types.annotation import Annotation +from .types.annotation_spec import AnnotationSpec +from .types.artifact import Artifact +from .types.batch_prediction_job import BatchPredictionJob +from .types.completion_stats import CompletionStats +from .types.context import Context +from .types.custom_job import ContainerSpec +from .types.custom_job import CustomJob +from .types.custom_job import CustomJobSpec +from .types.custom_job import PythonPackageSpec +from .types.custom_job import Scheduling +from .types.custom_job import WorkerPoolSpec +from .types.data_item import DataItem +from .types.data_labeling_job import ActiveLearningConfig +from .types.data_labeling_job import DataLabelingJob +from .types.data_labeling_job import SampleConfig +from .types.data_labeling_job import TrainingConfig +from .types.dataset import Dataset +from .types.dataset import ExportDataConfig +from .types.dataset import ImportDataConfig +from .types.dataset_service import CreateDatasetOperationMetadata +from .types.dataset_service import CreateDatasetRequest +from .types.dataset_service import DeleteDatasetRequest +from .types.dataset_service import ExportDataOperationMetadata +from .types.dataset_service import ExportDataRequest +from .types.dataset_service import ExportDataResponse +from .types.dataset_service import GetAnnotationSpecRequest +from .types.dataset_service import GetDatasetRequest +from .types.dataset_service import ImportDataOperationMetadata +from .types.dataset_service import ImportDataRequest +from .types.dataset_service import ImportDataResponse +from .types.dataset_service import ListAnnotationsRequest +from .types.dataset_service import ListAnnotationsResponse +from .types.dataset_service import ListDataItemsRequest +from .types.dataset_service import ListDataItemsResponse +from .types.dataset_service import ListDatasetsRequest +from .types.dataset_service import ListDatasetsResponse +from .types.dataset_service import UpdateDatasetRequest +from .types.deployed_index_ref import DeployedIndexRef +from .types.deployed_model_ref import DeployedModelRef +from .types.encryption_spec import EncryptionSpec +from .types.endpoint import DeployedModel +from .types.endpoint import Endpoint +from .types.endpoint_service import CreateEndpointOperationMetadata +from .types.endpoint_service import CreateEndpointRequest +from .types.endpoint_service import DeleteEndpointRequest +from .types.endpoint_service import DeployModelOperationMetadata +from .types.endpoint_service import DeployModelRequest +from .types.endpoint_service import DeployModelResponse +from .types.endpoint_service import GetEndpointRequest +from .types.endpoint_service import ListEndpointsRequest +from .types.endpoint_service import ListEndpointsResponse +from .types.endpoint_service import UndeployModelOperationMetadata +from .types.endpoint_service import UndeployModelRequest +from .types.endpoint_service import UndeployModelResponse +from .types.endpoint_service import UpdateEndpointRequest +from .types.entity_type import EntityType +from .types.env_var import EnvVar +from .types.event import Event +from .types.execution import Execution +from .types.explanation import Attribution +from .types.explanation import Explanation +from .types.explanation import ExplanationMetadataOverride +from .types.explanation import ExplanationParameters +from .types.explanation import ExplanationSpec +from .types.explanation import ExplanationSpecOverride +from .types.explanation import FeatureNoiseSigma +from .types.explanation import IntegratedGradientsAttribution +from .types.explanation import ModelExplanation +from .types.explanation import SampledShapleyAttribution +from .types.explanation import SmoothGradConfig +from .types.explanation import XraiAttribution +from .types.explanation_metadata import ExplanationMetadata +from .types.feature import Feature +from .types.feature_monitoring_stats import FeatureStatsAnomaly +from .types.feature_selector import FeatureSelector +from .types.feature_selector import IdMatcher +from .types.featurestore import Featurestore +from .types.featurestore_monitoring import FeaturestoreMonitoringConfig +from .types.featurestore_online_service import FeatureValue +from .types.featurestore_online_service import FeatureValueList +from .types.featurestore_online_service import ReadFeatureValuesRequest +from .types.featurestore_online_service import ReadFeatureValuesResponse +from .types.featurestore_online_service import StreamingReadFeatureValuesRequest +from .types.featurestore_service import BatchCreateFeaturesOperationMetadata +from .types.featurestore_service import BatchCreateFeaturesRequest +from .types.featurestore_service import BatchCreateFeaturesResponse +from .types.featurestore_service import BatchReadFeatureValuesOperationMetadata +from .types.featurestore_service import BatchReadFeatureValuesRequest +from .types.featurestore_service import BatchReadFeatureValuesResponse +from .types.featurestore_service import CreateEntityTypeOperationMetadata +from .types.featurestore_service import CreateEntityTypeRequest +from .types.featurestore_service import CreateFeatureOperationMetadata +from .types.featurestore_service import CreateFeatureRequest +from .types.featurestore_service import CreateFeaturestoreOperationMetadata +from .types.featurestore_service import CreateFeaturestoreRequest +from .types.featurestore_service import DeleteEntityTypeRequest +from .types.featurestore_service import DeleteFeatureRequest +from .types.featurestore_service import DeleteFeaturestoreRequest +from .types.featurestore_service import DestinationFeatureSetting +from .types.featurestore_service import ExportFeatureValuesOperationMetadata +from .types.featurestore_service import ExportFeatureValuesRequest +from .types.featurestore_service import ExportFeatureValuesResponse +from .types.featurestore_service import FeatureValueDestination +from .types.featurestore_service import GetEntityTypeRequest +from .types.featurestore_service import GetFeatureRequest +from .types.featurestore_service import GetFeaturestoreRequest +from .types.featurestore_service import ImportFeatureValuesOperationMetadata +from .types.featurestore_service import ImportFeatureValuesRequest +from .types.featurestore_service import ImportFeatureValuesResponse +from .types.featurestore_service import ListEntityTypesRequest +from .types.featurestore_service import ListEntityTypesResponse +from .types.featurestore_service import ListFeaturesRequest +from .types.featurestore_service import ListFeaturesResponse +from .types.featurestore_service import ListFeaturestoresRequest +from .types.featurestore_service import ListFeaturestoresResponse +from .types.featurestore_service import SearchFeaturesRequest +from .types.featurestore_service import SearchFeaturesResponse +from .types.featurestore_service import UpdateEntityTypeRequest +from .types.featurestore_service import UpdateFeatureRequest +from .types.featurestore_service import UpdateFeaturestoreOperationMetadata +from .types.featurestore_service import UpdateFeaturestoreRequest +from .types.hyperparameter_tuning_job import HyperparameterTuningJob +from .types.index import Index +from .types.index_endpoint import DeployedIndex +from .types.index_endpoint import DeployedIndexAuthConfig +from .types.index_endpoint import IndexEndpoint +from .types.index_endpoint import IndexPrivateEndpoints +from .types.index_endpoint_service import CreateIndexEndpointOperationMetadata +from .types.index_endpoint_service import CreateIndexEndpointRequest +from .types.index_endpoint_service import DeleteIndexEndpointRequest +from .types.index_endpoint_service import DeployIndexOperationMetadata +from .types.index_endpoint_service import DeployIndexRequest +from .types.index_endpoint_service import DeployIndexResponse +from .types.index_endpoint_service import GetIndexEndpointRequest +from .types.index_endpoint_service import ListIndexEndpointsRequest +from .types.index_endpoint_service import ListIndexEndpointsResponse +from .types.index_endpoint_service import UndeployIndexOperationMetadata +from .types.index_endpoint_service import UndeployIndexRequest +from .types.index_endpoint_service import UndeployIndexResponse +from .types.index_endpoint_service import UpdateIndexEndpointRequest +from .types.index_service import CreateIndexOperationMetadata +from .types.index_service import CreateIndexRequest +from .types.index_service import DeleteIndexRequest +from .types.index_service import GetIndexRequest +from .types.index_service import ListIndexesRequest +from .types.index_service import ListIndexesResponse +from .types.index_service import NearestNeighborSearchOperationMetadata +from .types.index_service import UpdateIndexOperationMetadata +from .types.index_service import UpdateIndexRequest +from .types.io import AvroSource +from .types.io import BigQueryDestination +from .types.io import BigQuerySource +from .types.io import ContainerRegistryDestination +from .types.io import CsvDestination +from .types.io import CsvSource +from .types.io import GcsDestination +from .types.io import GcsSource +from .types.io import TFRecordDestination +from .types.job_service import CancelBatchPredictionJobRequest +from .types.job_service import CancelCustomJobRequest +from .types.job_service import CancelDataLabelingJobRequest +from .types.job_service import CancelHyperparameterTuningJobRequest +from .types.job_service import CreateBatchPredictionJobRequest +from .types.job_service import CreateCustomJobRequest +from .types.job_service import CreateDataLabelingJobRequest +from .types.job_service import CreateHyperparameterTuningJobRequest +from .types.job_service import CreateModelDeploymentMonitoringJobRequest +from .types.job_service import DeleteBatchPredictionJobRequest +from .types.job_service import DeleteCustomJobRequest +from .types.job_service import DeleteDataLabelingJobRequest +from .types.job_service import DeleteHyperparameterTuningJobRequest +from .types.job_service import DeleteModelDeploymentMonitoringJobRequest +from .types.job_service import GetBatchPredictionJobRequest +from .types.job_service import GetCustomJobRequest +from .types.job_service import GetDataLabelingJobRequest +from .types.job_service import GetHyperparameterTuningJobRequest +from .types.job_service import GetModelDeploymentMonitoringJobRequest +from .types.job_service import ListBatchPredictionJobsRequest +from .types.job_service import ListBatchPredictionJobsResponse +from .types.job_service import ListCustomJobsRequest +from .types.job_service import ListCustomJobsResponse +from .types.job_service import ListDataLabelingJobsRequest +from .types.job_service import ListDataLabelingJobsResponse +from .types.job_service import ListHyperparameterTuningJobsRequest +from .types.job_service import ListHyperparameterTuningJobsResponse +from .types.job_service import ListModelDeploymentMonitoringJobsRequest +from .types.job_service import ListModelDeploymentMonitoringJobsResponse +from .types.job_service import PauseModelDeploymentMonitoringJobRequest +from .types.job_service import ResumeModelDeploymentMonitoringJobRequest +from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest +from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse +from .types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata +from .types.job_service import UpdateModelDeploymentMonitoringJobRequest +from .types.job_state import JobState +from .types.lineage_subgraph import LineageSubgraph +from .types.machine_resources import AutomaticResources +from .types.machine_resources import AutoscalingMetricSpec +from .types.machine_resources import BatchDedicatedResources +from .types.machine_resources import DedicatedResources +from .types.machine_resources import DiskSpec +from .types.machine_resources import MachineSpec +from .types.machine_resources import ResourcesConsumed +from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters +from .types.metadata_schema import MetadataSchema +from .types.metadata_service import AddContextArtifactsAndExecutionsRequest +from .types.metadata_service import AddContextArtifactsAndExecutionsResponse +from .types.metadata_service import AddContextChildrenRequest +from .types.metadata_service import AddContextChildrenResponse +from .types.metadata_service import AddExecutionEventsRequest +from .types.metadata_service import AddExecutionEventsResponse +from .types.metadata_service import CreateArtifactRequest +from .types.metadata_service import CreateContextRequest +from .types.metadata_service import CreateExecutionRequest +from .types.metadata_service import CreateMetadataSchemaRequest +from .types.metadata_service import CreateMetadataStoreOperationMetadata +from .types.metadata_service import CreateMetadataStoreRequest +from .types.metadata_service import DeleteContextRequest +from .types.metadata_service import DeleteMetadataStoreOperationMetadata +from .types.metadata_service import DeleteMetadataStoreRequest +from .types.metadata_service import GetArtifactRequest +from .types.metadata_service import GetContextRequest +from .types.metadata_service import GetExecutionRequest +from .types.metadata_service import GetMetadataSchemaRequest +from .types.metadata_service import GetMetadataStoreRequest +from .types.metadata_service import ListArtifactsRequest +from .types.metadata_service import ListArtifactsResponse +from .types.metadata_service import ListContextsRequest +from .types.metadata_service import ListContextsResponse +from .types.metadata_service import ListExecutionsRequest +from .types.metadata_service import ListExecutionsResponse +from .types.metadata_service import ListMetadataSchemasRequest +from .types.metadata_service import ListMetadataSchemasResponse +from .types.metadata_service import ListMetadataStoresRequest +from .types.metadata_service import ListMetadataStoresResponse +from .types.metadata_service import QueryArtifactLineageSubgraphRequest +from .types.metadata_service import QueryContextLineageSubgraphRequest +from .types.metadata_service import QueryExecutionInputsAndOutputsRequest +from .types.metadata_service import UpdateArtifactRequest +from .types.metadata_service import UpdateContextRequest +from .types.metadata_service import UpdateExecutionRequest +from .types.metadata_store import MetadataStore +from .types.migratable_resource import MigratableResource +from .types.migration_service import BatchMigrateResourcesOperationMetadata +from .types.migration_service import BatchMigrateResourcesRequest +from .types.migration_service import BatchMigrateResourcesResponse +from .types.migration_service import MigrateResourceRequest +from .types.migration_service import MigrateResourceResponse +from .types.migration_service import SearchMigratableResourcesRequest +from .types.migration_service import SearchMigratableResourcesResponse +from .types.model import Model +from .types.model import ModelContainerSpec +from .types.model import Port +from .types.model import PredictSchemata +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig +from .types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies +from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType +from .types.model_evaluation import ModelEvaluation +from .types.model_evaluation_slice import ModelEvaluationSlice +from .types.model_monitoring import ModelMonitoringAlertConfig +from .types.model_monitoring import ModelMonitoringObjectiveConfig +from .types.model_monitoring import SamplingStrategy +from .types.model_monitoring import ThresholdConfig +from .types.model_service import DeleteModelRequest +from .types.model_service import ExportModelOperationMetadata +from .types.model_service import ExportModelRequest +from .types.model_service import ExportModelResponse +from .types.model_service import GetModelEvaluationRequest +from .types.model_service import GetModelEvaluationSliceRequest +from .types.model_service import GetModelRequest +from .types.model_service import ListModelEvaluationSlicesRequest +from .types.model_service import ListModelEvaluationSlicesResponse +from .types.model_service import ListModelEvaluationsRequest +from .types.model_service import ListModelEvaluationsResponse +from .types.model_service import ListModelsRequest +from .types.model_service import ListModelsResponse +from .types.model_service import UpdateModelRequest +from .types.model_service import UploadModelOperationMetadata +from .types.model_service import UploadModelRequest +from .types.model_service import UploadModelResponse +from .types.operation import DeleteOperationMetadata +from .types.operation import GenericOperationMetadata +from .types.pipeline_job import PipelineJob +from .types.pipeline_job import PipelineJobDetail +from .types.pipeline_job import PipelineTaskDetail +from .types.pipeline_job import PipelineTaskExecutorDetail +from .types.pipeline_service import CancelPipelineJobRequest +from .types.pipeline_service import CancelTrainingPipelineRequest +from .types.pipeline_service import CreatePipelineJobRequest +from .types.pipeline_service import CreateTrainingPipelineRequest +from .types.pipeline_service import DeletePipelineJobRequest +from .types.pipeline_service import DeleteTrainingPipelineRequest +from .types.pipeline_service import GetPipelineJobRequest +from .types.pipeline_service import GetTrainingPipelineRequest +from .types.pipeline_service import ListPipelineJobsRequest +from .types.pipeline_service import ListPipelineJobsResponse +from .types.pipeline_service import ListTrainingPipelinesRequest +from .types.pipeline_service import ListTrainingPipelinesResponse +from .types.pipeline_state import PipelineState +from .types.prediction_service import ExplainRequest +from .types.prediction_service import ExplainResponse +from .types.prediction_service import PredictRequest +from .types.prediction_service import PredictResponse +from .types.specialist_pool import SpecialistPool +from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata +from .types.specialist_pool_service import CreateSpecialistPoolRequest +from .types.specialist_pool_service import DeleteSpecialistPoolRequest +from .types.specialist_pool_service import GetSpecialistPoolRequest +from .types.specialist_pool_service import ListSpecialistPoolsRequest +from .types.specialist_pool_service import ListSpecialistPoolsResponse +from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata +from .types.specialist_pool_service import UpdateSpecialistPoolRequest +from .types.study import Measurement +from .types.study import Study +from .types.study import StudySpec +from .types.study import Trial +from .types.tensorboard import Tensorboard +from .types.tensorboard_data import Scalar +from .types.tensorboard_data import TensorboardBlob +from .types.tensorboard_data import TensorboardBlobSequence +from .types.tensorboard_data import TensorboardTensor +from .types.tensorboard_data import TimeSeriesData +from .types.tensorboard_data import TimeSeriesDataPoint +from .types.tensorboard_experiment import TensorboardExperiment +from .types.tensorboard_run import TensorboardRun +from .types.tensorboard_service import CreateTensorboardExperimentRequest +from .types.tensorboard_service import CreateTensorboardOperationMetadata +from .types.tensorboard_service import CreateTensorboardRequest +from .types.tensorboard_service import CreateTensorboardRunRequest +from .types.tensorboard_service import CreateTensorboardTimeSeriesRequest +from .types.tensorboard_service import DeleteTensorboardExperimentRequest +from .types.tensorboard_service import DeleteTensorboardRequest +from .types.tensorboard_service import DeleteTensorboardRunRequest +from .types.tensorboard_service import DeleteTensorboardTimeSeriesRequest +from .types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import GetTensorboardExperimentRequest +from .types.tensorboard_service import GetTensorboardRequest +from .types.tensorboard_service import GetTensorboardRunRequest +from .types.tensorboard_service import GetTensorboardTimeSeriesRequest +from .types.tensorboard_service import ListTensorboardExperimentsRequest +from .types.tensorboard_service import ListTensorboardExperimentsResponse +from .types.tensorboard_service import ListTensorboardRunsRequest +from .types.tensorboard_service import ListTensorboardRunsResponse +from .types.tensorboard_service import ListTensorboardsRequest +from .types.tensorboard_service import ListTensorboardsResponse +from .types.tensorboard_service import ListTensorboardTimeSeriesRequest +from .types.tensorboard_service import ListTensorboardTimeSeriesResponse +from .types.tensorboard_service import ReadTensorboardBlobDataRequest +from .types.tensorboard_service import ReadTensorboardBlobDataResponse +from .types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest +from .types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse +from .types.tensorboard_service import UpdateTensorboardExperimentRequest +from .types.tensorboard_service import UpdateTensorboardOperationMetadata +from .types.tensorboard_service import UpdateTensorboardRequest +from .types.tensorboard_service import UpdateTensorboardRunRequest +from .types.tensorboard_service import UpdateTensorboardTimeSeriesRequest +from .types.tensorboard_service import WriteTensorboardRunDataRequest +from .types.tensorboard_service import WriteTensorboardRunDataResponse +from .types.tensorboard_time_series import TensorboardTimeSeries +from .types.training_pipeline import FilterSplit +from .types.training_pipeline import FractionSplit +from .types.training_pipeline import InputDataConfig +from .types.training_pipeline import PredefinedSplit +from .types.training_pipeline import TimestampSplit +from .types.training_pipeline import TrainingPipeline +from .types.types import BoolArray +from .types.types import DoubleArray +from .types.types import Int64Array +from .types.types import StringArray +from .types.user_action_reference import UserActionReference +from .types.value import Value +from .types.vizier_service import AddTrialMeasurementRequest +from .types.vizier_service import CheckTrialEarlyStoppingStateMetatdata +from .types.vizier_service import CheckTrialEarlyStoppingStateRequest +from .types.vizier_service import CheckTrialEarlyStoppingStateResponse +from .types.vizier_service import CompleteTrialRequest +from .types.vizier_service import CreateStudyRequest +from .types.vizier_service import CreateTrialRequest +from .types.vizier_service import DeleteStudyRequest +from .types.vizier_service import DeleteTrialRequest +from .types.vizier_service import GetStudyRequest +from .types.vizier_service import GetTrialRequest +from .types.vizier_service import ListOptimalTrialsRequest +from .types.vizier_service import ListOptimalTrialsResponse +from .types.vizier_service import ListStudiesRequest +from .types.vizier_service import ListStudiesResponse +from .types.vizier_service import ListTrialsRequest +from .types.vizier_service import ListTrialsResponse +from .types.vizier_service import LookupStudyRequest +from .types.vizier_service import StopTrialRequest +from .types.vizier_service import SuggestTrialsMetadata +from .types.vizier_service import SuggestTrialsRequest +from .types.vizier_service import SuggestTrialsResponse + +__all__ = ( + 'DatasetServiceAsyncClient', + 'EndpointServiceAsyncClient', + 'FeaturestoreOnlineServingServiceAsyncClient', + 'FeaturestoreServiceAsyncClient', + 'IndexEndpointServiceAsyncClient', + 'IndexServiceAsyncClient', + 'JobServiceAsyncClient', + 'MetadataServiceAsyncClient', + 'MigrationServiceAsyncClient', + 'ModelServiceAsyncClient', + 'PipelineServiceAsyncClient', + 'PredictionServiceAsyncClient', + 'SpecialistPoolServiceAsyncClient', + 'TensorboardServiceAsyncClient', + 'VizierServiceAsyncClient', +'AcceleratorType', +'ActiveLearningConfig', +'AddContextArtifactsAndExecutionsRequest', +'AddContextArtifactsAndExecutionsResponse', +'AddContextChildrenRequest', +'AddContextChildrenResponse', +'AddExecutionEventsRequest', +'AddExecutionEventsResponse', +'AddTrialMeasurementRequest', +'Annotation', +'AnnotationSpec', +'Artifact', +'Attribution', +'AutomaticResources', +'AutoscalingMetricSpec', +'AvroSource', +'BatchCreateFeaturesOperationMetadata', +'BatchCreateFeaturesRequest', +'BatchCreateFeaturesResponse', +'BatchDedicatedResources', +'BatchMigrateResourcesOperationMetadata', +'BatchMigrateResourcesRequest', +'BatchMigrateResourcesResponse', +'BatchPredictionJob', +'BatchReadFeatureValuesOperationMetadata', +'BatchReadFeatureValuesRequest', +'BatchReadFeatureValuesResponse', +'BigQueryDestination', +'BigQuerySource', +'BoolArray', +'CancelBatchPredictionJobRequest', +'CancelCustomJobRequest', +'CancelDataLabelingJobRequest', +'CancelHyperparameterTuningJobRequest', +'CancelPipelineJobRequest', +'CancelTrainingPipelineRequest', +'CheckTrialEarlyStoppingStateMetatdata', +'CheckTrialEarlyStoppingStateRequest', +'CheckTrialEarlyStoppingStateResponse', +'CompleteTrialRequest', +'CompletionStats', +'ContainerRegistryDestination', +'ContainerSpec', +'Context', +'CreateArtifactRequest', +'CreateBatchPredictionJobRequest', +'CreateContextRequest', +'CreateCustomJobRequest', +'CreateDataLabelingJobRequest', +'CreateDatasetOperationMetadata', +'CreateDatasetRequest', +'CreateEndpointOperationMetadata', +'CreateEndpointRequest', +'CreateEntityTypeOperationMetadata', +'CreateEntityTypeRequest', +'CreateExecutionRequest', +'CreateFeatureOperationMetadata', +'CreateFeatureRequest', +'CreateFeaturestoreOperationMetadata', +'CreateFeaturestoreRequest', +'CreateHyperparameterTuningJobRequest', +'CreateIndexEndpointOperationMetadata', +'CreateIndexEndpointRequest', +'CreateIndexOperationMetadata', +'CreateIndexRequest', +'CreateMetadataSchemaRequest', +'CreateMetadataStoreOperationMetadata', +'CreateMetadataStoreRequest', +'CreateModelDeploymentMonitoringJobRequest', +'CreatePipelineJobRequest', +'CreateSpecialistPoolOperationMetadata', +'CreateSpecialistPoolRequest', +'CreateStudyRequest', +'CreateTensorboardExperimentRequest', +'CreateTensorboardOperationMetadata', +'CreateTensorboardRequest', +'CreateTensorboardRunRequest', +'CreateTensorboardTimeSeriesRequest', +'CreateTrainingPipelineRequest', +'CreateTrialRequest', +'CsvDestination', +'CsvSource', +'CustomJob', +'CustomJobSpec', +'DataItem', +'DataLabelingJob', +'Dataset', +'DatasetServiceClient', +'DedicatedResources', +'DeleteBatchPredictionJobRequest', +'DeleteContextRequest', +'DeleteCustomJobRequest', +'DeleteDataLabelingJobRequest', +'DeleteDatasetRequest', +'DeleteEndpointRequest', +'DeleteEntityTypeRequest', +'DeleteFeatureRequest', +'DeleteFeaturestoreRequest', +'DeleteHyperparameterTuningJobRequest', +'DeleteIndexEndpointRequest', +'DeleteIndexRequest', +'DeleteMetadataStoreOperationMetadata', +'DeleteMetadataStoreRequest', +'DeleteModelDeploymentMonitoringJobRequest', +'DeleteModelRequest', +'DeleteOperationMetadata', +'DeletePipelineJobRequest', +'DeleteSpecialistPoolRequest', +'DeleteStudyRequest', +'DeleteTensorboardExperimentRequest', +'DeleteTensorboardRequest', +'DeleteTensorboardRunRequest', +'DeleteTensorboardTimeSeriesRequest', +'DeleteTrainingPipelineRequest', +'DeleteTrialRequest', +'DeployIndexOperationMetadata', +'DeployIndexRequest', +'DeployIndexResponse', +'DeployModelOperationMetadata', +'DeployModelRequest', +'DeployModelResponse', +'DeployedIndex', +'DeployedIndexAuthConfig', +'DeployedIndexRef', +'DeployedModel', +'DeployedModelRef', +'DestinationFeatureSetting', +'DiskSpec', +'DoubleArray', +'EncryptionSpec', +'Endpoint', +'EndpointServiceClient', +'EntityType', +'EnvVar', +'Event', +'Execution', +'ExplainRequest', +'ExplainResponse', +'Explanation', +'ExplanationMetadata', +'ExplanationMetadataOverride', +'ExplanationParameters', +'ExplanationSpec', +'ExplanationSpecOverride', +'ExportDataConfig', +'ExportDataOperationMetadata', +'ExportDataRequest', +'ExportDataResponse', +'ExportFeatureValuesOperationMetadata', +'ExportFeatureValuesRequest', +'ExportFeatureValuesResponse', +'ExportModelOperationMetadata', +'ExportModelRequest', +'ExportModelResponse', +'ExportTensorboardTimeSeriesDataRequest', +'ExportTensorboardTimeSeriesDataResponse', +'Feature', +'FeatureNoiseSigma', +'FeatureSelector', +'FeatureStatsAnomaly', +'FeatureValue', +'FeatureValueDestination', +'FeatureValueList', +'Featurestore', +'FeaturestoreMonitoringConfig', +'FeaturestoreOnlineServingServiceClient', +'FeaturestoreServiceClient', +'FilterSplit', +'FractionSplit', +'GcsDestination', +'GcsSource', +'GenericOperationMetadata', +'GetAnnotationSpecRequest', +'GetArtifactRequest', +'GetBatchPredictionJobRequest', +'GetContextRequest', +'GetCustomJobRequest', +'GetDataLabelingJobRequest', +'GetDatasetRequest', +'GetEndpointRequest', +'GetEntityTypeRequest', +'GetExecutionRequest', +'GetFeatureRequest', +'GetFeaturestoreRequest', +'GetHyperparameterTuningJobRequest', +'GetIndexEndpointRequest', +'GetIndexRequest', +'GetMetadataSchemaRequest', +'GetMetadataStoreRequest', +'GetModelDeploymentMonitoringJobRequest', +'GetModelEvaluationRequest', +'GetModelEvaluationSliceRequest', +'GetModelRequest', +'GetPipelineJobRequest', +'GetSpecialistPoolRequest', +'GetStudyRequest', +'GetTensorboardExperimentRequest', +'GetTensorboardRequest', +'GetTensorboardRunRequest', +'GetTensorboardTimeSeriesRequest', +'GetTrainingPipelineRequest', +'GetTrialRequest', +'HyperparameterTuningJob', +'IdMatcher', +'ImportDataConfig', +'ImportDataOperationMetadata', +'ImportDataRequest', +'ImportDataResponse', +'ImportFeatureValuesOperationMetadata', +'ImportFeatureValuesRequest', +'ImportFeatureValuesResponse', +'Index', +'IndexEndpoint', +'IndexEndpointServiceClient', +'IndexPrivateEndpoints', +'IndexServiceClient', +'InputDataConfig', +'Int64Array', +'IntegratedGradientsAttribution', +'JobServiceClient', +'JobState', +'LineageSubgraph', +'ListAnnotationsRequest', +'ListAnnotationsResponse', +'ListArtifactsRequest', +'ListArtifactsResponse', +'ListBatchPredictionJobsRequest', +'ListBatchPredictionJobsResponse', +'ListContextsRequest', +'ListContextsResponse', +'ListCustomJobsRequest', +'ListCustomJobsResponse', +'ListDataItemsRequest', +'ListDataItemsResponse', +'ListDataLabelingJobsRequest', +'ListDataLabelingJobsResponse', +'ListDatasetsRequest', +'ListDatasetsResponse', +'ListEndpointsRequest', +'ListEndpointsResponse', +'ListEntityTypesRequest', +'ListEntityTypesResponse', +'ListExecutionsRequest', +'ListExecutionsResponse', +'ListFeaturesRequest', +'ListFeaturesResponse', +'ListFeaturestoresRequest', +'ListFeaturestoresResponse', +'ListHyperparameterTuningJobsRequest', +'ListHyperparameterTuningJobsResponse', +'ListIndexEndpointsRequest', +'ListIndexEndpointsResponse', +'ListIndexesRequest', +'ListIndexesResponse', +'ListMetadataSchemasRequest', +'ListMetadataSchemasResponse', +'ListMetadataStoresRequest', +'ListMetadataStoresResponse', +'ListModelDeploymentMonitoringJobsRequest', +'ListModelDeploymentMonitoringJobsResponse', +'ListModelEvaluationSlicesRequest', +'ListModelEvaluationSlicesResponse', +'ListModelEvaluationsRequest', +'ListModelEvaluationsResponse', +'ListModelsRequest', +'ListModelsResponse', +'ListOptimalTrialsRequest', +'ListOptimalTrialsResponse', +'ListPipelineJobsRequest', +'ListPipelineJobsResponse', +'ListSpecialistPoolsRequest', +'ListSpecialistPoolsResponse', +'ListStudiesRequest', +'ListStudiesResponse', +'ListTensorboardExperimentsRequest', +'ListTensorboardExperimentsResponse', +'ListTensorboardRunsRequest', +'ListTensorboardRunsResponse', +'ListTensorboardTimeSeriesRequest', +'ListTensorboardTimeSeriesResponse', +'ListTensorboardsRequest', +'ListTensorboardsResponse', +'ListTrainingPipelinesRequest', +'ListTrainingPipelinesResponse', +'ListTrialsRequest', +'ListTrialsResponse', +'LookupStudyRequest', +'MachineSpec', +'ManualBatchTuningParameters', +'Measurement', +'MetadataSchema', +'MetadataServiceClient', +'MetadataStore', +'MigratableResource', +'MigrateResourceRequest', +'MigrateResourceResponse', +'MigrationServiceClient', +'Model', +'ModelContainerSpec', +'ModelDeploymentMonitoringBigQueryTable', +'ModelDeploymentMonitoringJob', +'ModelDeploymentMonitoringObjectiveConfig', +'ModelDeploymentMonitoringObjectiveType', +'ModelDeploymentMonitoringScheduleConfig', +'ModelEvaluation', +'ModelEvaluationSlice', +'ModelExplanation', +'ModelMonitoringAlertConfig', +'ModelMonitoringObjectiveConfig', +'ModelMonitoringStatsAnomalies', +'ModelServiceClient', +'NearestNeighborSearchOperationMetadata', +'PauseModelDeploymentMonitoringJobRequest', +'PipelineJob', +'PipelineJobDetail', +'PipelineServiceClient', +'PipelineState', +'PipelineTaskDetail', +'PipelineTaskExecutorDetail', +'Port', +'PredefinedSplit', +'PredictRequest', +'PredictResponse', +'PredictSchemata', +'PredictionServiceClient', +'PythonPackageSpec', +'QueryArtifactLineageSubgraphRequest', +'QueryContextLineageSubgraphRequest', +'QueryExecutionInputsAndOutputsRequest', +'ReadFeatureValuesRequest', +'ReadFeatureValuesResponse', +'ReadTensorboardBlobDataRequest', +'ReadTensorboardBlobDataResponse', +'ReadTensorboardTimeSeriesDataRequest', +'ReadTensorboardTimeSeriesDataResponse', +'ResourcesConsumed', +'ResumeModelDeploymentMonitoringJobRequest', +'SampleConfig', +'SampledShapleyAttribution', +'SamplingStrategy', +'Scalar', +'Scheduling', +'SearchFeaturesRequest', +'SearchFeaturesResponse', +'SearchMigratableResourcesRequest', +'SearchMigratableResourcesResponse', +'SearchModelDeploymentMonitoringStatsAnomaliesRequest', +'SearchModelDeploymentMonitoringStatsAnomaliesResponse', +'SmoothGradConfig', +'SpecialistPool', +'SpecialistPoolServiceClient', +'StopTrialRequest', +'StreamingReadFeatureValuesRequest', +'StringArray', +'Study', +'StudySpec', +'SuggestTrialsMetadata', +'SuggestTrialsRequest', +'SuggestTrialsResponse', +'TFRecordDestination', +'Tensorboard', +'TensorboardBlob', +'TensorboardBlobSequence', +'TensorboardExperiment', +'TensorboardRun', +'TensorboardServiceClient', +'TensorboardTensor', +'TensorboardTimeSeries', +'ThresholdConfig', +'TimeSeriesData', +'TimeSeriesDataPoint', +'TimestampSplit', +'TrainingConfig', +'TrainingPipeline', +'Trial', +'UndeployIndexOperationMetadata', +'UndeployIndexRequest', +'UndeployIndexResponse', +'UndeployModelOperationMetadata', +'UndeployModelRequest', +'UndeployModelResponse', +'UpdateArtifactRequest', +'UpdateContextRequest', +'UpdateDatasetRequest', +'UpdateEndpointRequest', +'UpdateEntityTypeRequest', +'UpdateExecutionRequest', +'UpdateFeatureRequest', +'UpdateFeaturestoreOperationMetadata', +'UpdateFeaturestoreRequest', +'UpdateIndexEndpointRequest', +'UpdateIndexOperationMetadata', +'UpdateIndexRequest', +'UpdateModelDeploymentMonitoringJobOperationMetadata', +'UpdateModelDeploymentMonitoringJobRequest', +'UpdateModelRequest', +'UpdateSpecialistPoolOperationMetadata', +'UpdateSpecialistPoolRequest', +'UpdateTensorboardExperimentRequest', +'UpdateTensorboardOperationMetadata', +'UpdateTensorboardRequest', +'UpdateTensorboardRunRequest', +'UpdateTensorboardTimeSeriesRequest', +'UploadModelOperationMetadata', +'UploadModelRequest', +'UploadModelResponse', +'UserActionReference', +'Value', +'VizierServiceClient', +'WorkerPoolSpec', +'WriteTensorboardRunDataRequest', +'WriteTensorboardRunDataResponse', +'XraiAttribution', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..605e95582d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -0,0 +1,1949 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1", + "schema": "1.0", + "services": { + "DatasetService": { + "clients": { + "grpc": { + "libraryClient": "DatasetServiceClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListAnnotations": { + "methods": [ + "list_annotations" + ] + }, + "ListDataItems": { + "methods": [ + "list_data_items" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + } + } + }, + "grpc-async": { + "libraryClient": "DatasetServiceAsyncClient", + "rpcs": { + "CreateDataset": { + "methods": [ + "create_dataset" + ] + }, + "DeleteDataset": { + "methods": [ + "delete_dataset" + ] + }, + "ExportData": { + "methods": [ + "export_data" + ] + }, + "GetAnnotationSpec": { + "methods": [ + "get_annotation_spec" + ] + }, + "GetDataset": { + "methods": [ + "get_dataset" + ] + }, + "ImportData": { + "methods": [ + "import_data" + ] + }, + "ListAnnotations": { + "methods": [ + "list_annotations" + ] + }, + "ListDataItems": { + "methods": [ + "list_data_items" + ] + }, + "ListDatasets": { + "methods": [ + "list_datasets" + ] + }, + "UpdateDataset": { + "methods": [ + "update_dataset" + ] + } + } + } + } + }, + "EndpointService": { + "clients": { + "grpc": { + "libraryClient": "EndpointServiceClient", + "rpcs": { + "CreateEndpoint": { + "methods": [ + "create_endpoint" + ] + }, + "DeleteEndpoint": { + "methods": [ + "delete_endpoint" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "GetEndpoint": { + "methods": [ + "get_endpoint" + ] + }, + "ListEndpoints": { + "methods": [ + "list_endpoints" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateEndpoint": { + "methods": [ + "update_endpoint" + ] + } + } + }, + "grpc-async": { + "libraryClient": "EndpointServiceAsyncClient", + "rpcs": { + "CreateEndpoint": { + "methods": [ + "create_endpoint" + ] + }, + "DeleteEndpoint": { + "methods": [ + "delete_endpoint" + ] + }, + "DeployModel": { + "methods": [ + "deploy_model" + ] + }, + "GetEndpoint": { + "methods": [ + "get_endpoint" + ] + }, + "ListEndpoints": { + "methods": [ + "list_endpoints" + ] + }, + "UndeployModel": { + "methods": [ + "undeploy_model" + ] + }, + "UpdateEndpoint": { + "methods": [ + "update_endpoint" + ] + } + } + } + } + }, + "FeaturestoreOnlineServingService": { + "clients": { + "grpc": { + "libraryClient": "FeaturestoreOnlineServingServiceClient", + "rpcs": { + "ReadFeatureValues": { + "methods": [ + "read_feature_values" + ] + }, + "StreamingReadFeatureValues": { + "methods": [ + "streaming_read_feature_values" + ] + } + } + }, + "grpc-async": { + "libraryClient": "FeaturestoreOnlineServingServiceAsyncClient", + "rpcs": { + "ReadFeatureValues": { + "methods": [ + "read_feature_values" + ] + }, + "StreamingReadFeatureValues": { + "methods": [ + "streaming_read_feature_values" + ] + } + } + } + } + }, + "FeaturestoreService": { + "clients": { + "grpc": { + "libraryClient": "FeaturestoreServiceClient", + "rpcs": { + "BatchCreateFeatures": { + "methods": [ + "batch_create_features" + ] + }, + "BatchReadFeatureValues": { + "methods": [ + "batch_read_feature_values" + ] + }, + "CreateEntityType": { + "methods": [ + "create_entity_type" + ] + }, + "CreateFeature": { + "methods": [ + "create_feature" + ] + }, + "CreateFeaturestore": { + "methods": [ + "create_featurestore" + ] + }, + "DeleteEntityType": { + "methods": [ + "delete_entity_type" + ] + }, + "DeleteFeature": { + "methods": [ + "delete_feature" + ] + }, + "DeleteFeaturestore": { + "methods": [ + "delete_featurestore" + ] + }, + "ExportFeatureValues": { + "methods": [ + "export_feature_values" + ] + }, + "GetEntityType": { + "methods": [ + "get_entity_type" + ] + }, + "GetFeature": { + "methods": [ + "get_feature" + ] + }, + "GetFeaturestore": { + "methods": [ + "get_featurestore" + ] + }, + "ImportFeatureValues": { + "methods": [ + "import_feature_values" + ] + }, + "ListEntityTypes": { + "methods": [ + "list_entity_types" + ] + }, + "ListFeatures": { + "methods": [ + "list_features" + ] + }, + "ListFeaturestores": { + "methods": [ + "list_featurestores" + ] + }, + "SearchFeatures": { + "methods": [ + "search_features" + ] + }, + "UpdateEntityType": { + "methods": [ + "update_entity_type" + ] + }, + "UpdateFeature": { + "methods": [ + "update_feature" + ] + }, + "UpdateFeaturestore": { + "methods": [ + "update_featurestore" + ] + } + } + }, + "grpc-async": { + "libraryClient": "FeaturestoreServiceAsyncClient", + "rpcs": { + "BatchCreateFeatures": { + "methods": [ + "batch_create_features" + ] + }, + "BatchReadFeatureValues": { + "methods": [ + "batch_read_feature_values" + ] + }, + "CreateEntityType": { + "methods": [ + "create_entity_type" + ] + }, + "CreateFeature": { + "methods": [ + "create_feature" + ] + }, + "CreateFeaturestore": { + "methods": [ + "create_featurestore" + ] + }, + "DeleteEntityType": { + "methods": [ + "delete_entity_type" + ] + }, + "DeleteFeature": { + "methods": [ + "delete_feature" + ] + }, + "DeleteFeaturestore": { + "methods": [ + "delete_featurestore" + ] + }, + "ExportFeatureValues": { + "methods": [ + "export_feature_values" + ] + }, + "GetEntityType": { + "methods": [ + "get_entity_type" + ] + }, + "GetFeature": { + "methods": [ + "get_feature" + ] + }, + "GetFeaturestore": { + "methods": [ + "get_featurestore" + ] + }, + "ImportFeatureValues": { + "methods": [ + "import_feature_values" + ] + }, + "ListEntityTypes": { + "methods": [ + "list_entity_types" + ] + }, + "ListFeatures": { + "methods": [ + "list_features" + ] + }, + "ListFeaturestores": { + "methods": [ + "list_featurestores" + ] + }, + "SearchFeatures": { + "methods": [ + "search_features" + ] + }, + "UpdateEntityType": { + "methods": [ + "update_entity_type" + ] + }, + "UpdateFeature": { + "methods": [ + "update_feature" + ] + }, + "UpdateFeaturestore": { + "methods": [ + "update_featurestore" + ] + } + } + } + } + }, + "IndexEndpointService": { + "clients": { + "grpc": { + "libraryClient": "IndexEndpointServiceClient", + "rpcs": { + "CreateIndexEndpoint": { + "methods": [ + "create_index_endpoint" + ] + }, + "DeleteIndexEndpoint": { + "methods": [ + "delete_index_endpoint" + ] + }, + "DeployIndex": { + "methods": [ + "deploy_index" + ] + }, + "GetIndexEndpoint": { + "methods": [ + "get_index_endpoint" + ] + }, + "ListIndexEndpoints": { + "methods": [ + "list_index_endpoints" + ] + }, + "UndeployIndex": { + "methods": [ + "undeploy_index" + ] + }, + "UpdateIndexEndpoint": { + "methods": [ + "update_index_endpoint" + ] + } + } + }, + "grpc-async": { + "libraryClient": "IndexEndpointServiceAsyncClient", + "rpcs": { + "CreateIndexEndpoint": { + "methods": [ + "create_index_endpoint" + ] + }, + "DeleteIndexEndpoint": { + "methods": [ + "delete_index_endpoint" + ] + }, + "DeployIndex": { + "methods": [ + "deploy_index" + ] + }, + "GetIndexEndpoint": { + "methods": [ + "get_index_endpoint" + ] + }, + "ListIndexEndpoints": { + "methods": [ + "list_index_endpoints" + ] + }, + "UndeployIndex": { + "methods": [ + "undeploy_index" + ] + }, + "UpdateIndexEndpoint": { + "methods": [ + "update_index_endpoint" + ] + } + } + } + } + }, + "IndexService": { + "clients": { + "grpc": { + "libraryClient": "IndexServiceClient", + "rpcs": { + "CreateIndex": { + "methods": [ + "create_index" + ] + }, + "DeleteIndex": { + "methods": [ + "delete_index" + ] + }, + "GetIndex": { + "methods": [ + "get_index" + ] + }, + "ListIndexes": { + "methods": [ + "list_indexes" + ] + }, + "UpdateIndex": { + "methods": [ + "update_index" + ] + } + } + }, + "grpc-async": { + "libraryClient": "IndexServiceAsyncClient", + "rpcs": { + "CreateIndex": { + "methods": [ + "create_index" + ] + }, + "DeleteIndex": { + "methods": [ + "delete_index" + ] + }, + "GetIndex": { + "methods": [ + "get_index" + ] + }, + "ListIndexes": { + "methods": [ + "list_indexes" + ] + }, + "UpdateIndex": { + "methods": [ + "update_index" + ] + } + } + } + } + }, + "JobService": { + "clients": { + "grpc": { + "libraryClient": "JobServiceClient", + "rpcs": { + "CancelBatchPredictionJob": { + "methods": [ + "cancel_batch_prediction_job" + ] + }, + "CancelCustomJob": { + "methods": [ + "cancel_custom_job" + ] + }, + "CancelDataLabelingJob": { + "methods": [ + "cancel_data_labeling_job" + ] + }, + "CancelHyperparameterTuningJob": { + "methods": [ + "cancel_hyperparameter_tuning_job" + ] + }, + "CreateBatchPredictionJob": { + "methods": [ + "create_batch_prediction_job" + ] + }, + "CreateCustomJob": { + "methods": [ + "create_custom_job" + ] + }, + "CreateDataLabelingJob": { + "methods": [ + "create_data_labeling_job" + ] + }, + "CreateHyperparameterTuningJob": { + "methods": [ + "create_hyperparameter_tuning_job" + ] + }, + "CreateModelDeploymentMonitoringJob": { + "methods": [ + "create_model_deployment_monitoring_job" + ] + }, + "DeleteBatchPredictionJob": { + "methods": [ + "delete_batch_prediction_job" + ] + }, + "DeleteCustomJob": { + "methods": [ + "delete_custom_job" + ] + }, + "DeleteDataLabelingJob": { + "methods": [ + "delete_data_labeling_job" + ] + }, + "DeleteHyperparameterTuningJob": { + "methods": [ + "delete_hyperparameter_tuning_job" + ] + }, + "DeleteModelDeploymentMonitoringJob": { + "methods": [ + "delete_model_deployment_monitoring_job" + ] + }, + "GetBatchPredictionJob": { + "methods": [ + "get_batch_prediction_job" + ] + }, + "GetCustomJob": { + "methods": [ + "get_custom_job" + ] + }, + "GetDataLabelingJob": { + "methods": [ + "get_data_labeling_job" + ] + }, + "GetHyperparameterTuningJob": { + "methods": [ + "get_hyperparameter_tuning_job" + ] + }, + "GetModelDeploymentMonitoringJob": { + "methods": [ + "get_model_deployment_monitoring_job" + ] + }, + "ListBatchPredictionJobs": { + "methods": [ + "list_batch_prediction_jobs" + ] + }, + "ListCustomJobs": { + "methods": [ + "list_custom_jobs" + ] + }, + "ListDataLabelingJobs": { + "methods": [ + "list_data_labeling_jobs" + ] + }, + "ListHyperparameterTuningJobs": { + "methods": [ + "list_hyperparameter_tuning_jobs" + ] + }, + "ListModelDeploymentMonitoringJobs": { + "methods": [ + "list_model_deployment_monitoring_jobs" + ] + }, + "PauseModelDeploymentMonitoringJob": { + "methods": [ + "pause_model_deployment_monitoring_job" + ] + }, + "ResumeModelDeploymentMonitoringJob": { + "methods": [ + "resume_model_deployment_monitoring_job" + ] + }, + "SearchModelDeploymentMonitoringStatsAnomalies": { + "methods": [ + "search_model_deployment_monitoring_stats_anomalies" + ] + }, + "UpdateModelDeploymentMonitoringJob": { + "methods": [ + "update_model_deployment_monitoring_job" + ] + } + } + }, + "grpc-async": { + "libraryClient": "JobServiceAsyncClient", + "rpcs": { + "CancelBatchPredictionJob": { + "methods": [ + "cancel_batch_prediction_job" + ] + }, + "CancelCustomJob": { + "methods": [ + "cancel_custom_job" + ] + }, + "CancelDataLabelingJob": { + "methods": [ + "cancel_data_labeling_job" + ] + }, + "CancelHyperparameterTuningJob": { + "methods": [ + "cancel_hyperparameter_tuning_job" + ] + }, + "CreateBatchPredictionJob": { + "methods": [ + "create_batch_prediction_job" + ] + }, + "CreateCustomJob": { + "methods": [ + "create_custom_job" + ] + }, + "CreateDataLabelingJob": { + "methods": [ + "create_data_labeling_job" + ] + }, + "CreateHyperparameterTuningJob": { + "methods": [ + "create_hyperparameter_tuning_job" + ] + }, + "CreateModelDeploymentMonitoringJob": { + "methods": [ + "create_model_deployment_monitoring_job" + ] + }, + "DeleteBatchPredictionJob": { + "methods": [ + "delete_batch_prediction_job" + ] + }, + "DeleteCustomJob": { + "methods": [ + "delete_custom_job" + ] + }, + "DeleteDataLabelingJob": { + "methods": [ + "delete_data_labeling_job" + ] + }, + "DeleteHyperparameterTuningJob": { + "methods": [ + "delete_hyperparameter_tuning_job" + ] + }, + "DeleteModelDeploymentMonitoringJob": { + "methods": [ + "delete_model_deployment_monitoring_job" + ] + }, + "GetBatchPredictionJob": { + "methods": [ + "get_batch_prediction_job" + ] + }, + "GetCustomJob": { + "methods": [ + "get_custom_job" + ] + }, + "GetDataLabelingJob": { + "methods": [ + "get_data_labeling_job" + ] + }, + "GetHyperparameterTuningJob": { + "methods": [ + "get_hyperparameter_tuning_job" + ] + }, + "GetModelDeploymentMonitoringJob": { + "methods": [ + "get_model_deployment_monitoring_job" + ] + }, + "ListBatchPredictionJobs": { + "methods": [ + "list_batch_prediction_jobs" + ] + }, + "ListCustomJobs": { + "methods": [ + "list_custom_jobs" + ] + }, + "ListDataLabelingJobs": { + "methods": [ + "list_data_labeling_jobs" + ] + }, + "ListHyperparameterTuningJobs": { + "methods": [ + "list_hyperparameter_tuning_jobs" + ] + }, + "ListModelDeploymentMonitoringJobs": { + "methods": [ + "list_model_deployment_monitoring_jobs" + ] + }, + "PauseModelDeploymentMonitoringJob": { + "methods": [ + "pause_model_deployment_monitoring_job" + ] + }, + "ResumeModelDeploymentMonitoringJob": { + "methods": [ + "resume_model_deployment_monitoring_job" + ] + }, + "SearchModelDeploymentMonitoringStatsAnomalies": { + "methods": [ + "search_model_deployment_monitoring_stats_anomalies" + ] + }, + "UpdateModelDeploymentMonitoringJob": { + "methods": [ + "update_model_deployment_monitoring_job" + ] + } + } + } + } + }, + "MetadataService": { + "clients": { + "grpc": { + "libraryClient": "MetadataServiceClient", + "rpcs": { + "AddContextArtifactsAndExecutions": { + "methods": [ + "add_context_artifacts_and_executions" + ] + }, + "AddContextChildren": { + "methods": [ + "add_context_children" + ] + }, + "AddExecutionEvents": { + "methods": [ + "add_execution_events" + ] + }, + "CreateArtifact": { + "methods": [ + "create_artifact" + ] + }, + "CreateContext": { + "methods": [ + "create_context" + ] + }, + "CreateExecution": { + "methods": [ + "create_execution" + ] + }, + "CreateMetadataSchema": { + "methods": [ + "create_metadata_schema" + ] + }, + "CreateMetadataStore": { + "methods": [ + "create_metadata_store" + ] + }, + "DeleteContext": { + "methods": [ + "delete_context" + ] + }, + "DeleteMetadataStore": { + "methods": [ + "delete_metadata_store" + ] + }, + "GetArtifact": { + "methods": [ + "get_artifact" + ] + }, + "GetContext": { + "methods": [ + "get_context" + ] + }, + "GetExecution": { + "methods": [ + "get_execution" + ] + }, + "GetMetadataSchema": { + "methods": [ + "get_metadata_schema" + ] + }, + "GetMetadataStore": { + "methods": [ + "get_metadata_store" + ] + }, + "ListArtifacts": { + "methods": [ + "list_artifacts" + ] + }, + "ListContexts": { + "methods": [ + "list_contexts" + ] + }, + "ListExecutions": { + "methods": [ + "list_executions" + ] + }, + "ListMetadataSchemas": { + "methods": [ + "list_metadata_schemas" + ] + }, + "ListMetadataStores": { + "methods": [ + "list_metadata_stores" + ] + }, + "QueryArtifactLineageSubgraph": { + "methods": [ + "query_artifact_lineage_subgraph" + ] + }, + "QueryContextLineageSubgraph": { + "methods": [ + "query_context_lineage_subgraph" + ] + }, + "QueryExecutionInputsAndOutputs": { + "methods": [ + "query_execution_inputs_and_outputs" + ] + }, + "UpdateArtifact": { + "methods": [ + "update_artifact" + ] + }, + "UpdateContext": { + "methods": [ + "update_context" + ] + }, + "UpdateExecution": { + "methods": [ + "update_execution" + ] + } + } + }, + "grpc-async": { + "libraryClient": "MetadataServiceAsyncClient", + "rpcs": { + "AddContextArtifactsAndExecutions": { + "methods": [ + "add_context_artifacts_and_executions" + ] + }, + "AddContextChildren": { + "methods": [ + "add_context_children" + ] + }, + "AddExecutionEvents": { + "methods": [ + "add_execution_events" + ] + }, + "CreateArtifact": { + "methods": [ + "create_artifact" + ] + }, + "CreateContext": { + "methods": [ + "create_context" + ] + }, + "CreateExecution": { + "methods": [ + "create_execution" + ] + }, + "CreateMetadataSchema": { + "methods": [ + "create_metadata_schema" + ] + }, + "CreateMetadataStore": { + "methods": [ + "create_metadata_store" + ] + }, + "DeleteContext": { + "methods": [ + "delete_context" + ] + }, + "DeleteMetadataStore": { + "methods": [ + "delete_metadata_store" + ] + }, + "GetArtifact": { + "methods": [ + "get_artifact" + ] + }, + "GetContext": { + "methods": [ + "get_context" + ] + }, + "GetExecution": { + "methods": [ + "get_execution" + ] + }, + "GetMetadataSchema": { + "methods": [ + "get_metadata_schema" + ] + }, + "GetMetadataStore": { + "methods": [ + "get_metadata_store" + ] + }, + "ListArtifacts": { + "methods": [ + "list_artifacts" + ] + }, + "ListContexts": { + "methods": [ + "list_contexts" + ] + }, + "ListExecutions": { + "methods": [ + "list_executions" + ] + }, + "ListMetadataSchemas": { + "methods": [ + "list_metadata_schemas" + ] + }, + "ListMetadataStores": { + "methods": [ + "list_metadata_stores" + ] + }, + "QueryArtifactLineageSubgraph": { + "methods": [ + "query_artifact_lineage_subgraph" + ] + }, + "QueryContextLineageSubgraph": { + "methods": [ + "query_context_lineage_subgraph" + ] + }, + "QueryExecutionInputsAndOutputs": { + "methods": [ + "query_execution_inputs_and_outputs" + ] + }, + "UpdateArtifact": { + "methods": [ + "update_artifact" + ] + }, + "UpdateContext": { + "methods": [ + "update_context" + ] + }, + "UpdateExecution": { + "methods": [ + "update_execution" + ] + } + } + } + } + }, + "MigrationService": { + "clients": { + "grpc": { + "libraryClient": "MigrationServiceClient", + "rpcs": { + "BatchMigrateResources": { + "methods": [ + "batch_migrate_resources" + ] + }, + "SearchMigratableResources": { + "methods": [ + "search_migratable_resources" + ] + } + } + }, + "grpc-async": { + "libraryClient": "MigrationServiceAsyncClient", + "rpcs": { + "BatchMigrateResources": { + "methods": [ + "batch_migrate_resources" + ] + }, + "SearchMigratableResources": { + "methods": [ + "search_migratable_resources" + ] + } + } + } + } + }, + "ModelService": { + "clients": { + "grpc": { + "libraryClient": "ModelServiceClient", + "rpcs": { + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetModelEvaluationSlice": { + "methods": [ + "get_model_evaluation_slice" + ] + }, + "ListModelEvaluationSlices": { + "methods": [ + "list_model_evaluation_slices" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + }, + "UploadModel": { + "methods": [ + "upload_model" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ModelServiceAsyncClient", + "rpcs": { + "DeleteModel": { + "methods": [ + "delete_model" + ] + }, + "ExportModel": { + "methods": [ + "export_model" + ] + }, + "GetModel": { + "methods": [ + "get_model" + ] + }, + "GetModelEvaluation": { + "methods": [ + "get_model_evaluation" + ] + }, + "GetModelEvaluationSlice": { + "methods": [ + "get_model_evaluation_slice" + ] + }, + "ListModelEvaluationSlices": { + "methods": [ + "list_model_evaluation_slices" + ] + }, + "ListModelEvaluations": { + "methods": [ + "list_model_evaluations" + ] + }, + "ListModels": { + "methods": [ + "list_models" + ] + }, + "UpdateModel": { + "methods": [ + "update_model" + ] + }, + "UploadModel": { + "methods": [ + "upload_model" + ] + } + } + } + } + }, + "PipelineService": { + "clients": { + "grpc": { + "libraryClient": "PipelineServiceClient", + "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, + "CancelTrainingPipeline": { + "methods": [ + "cancel_training_pipeline" + ] + }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, + "CreateTrainingPipeline": { + "methods": [ + "create_training_pipeline" + ] + }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, + "DeleteTrainingPipeline": { + "methods": [ + "delete_training_pipeline" + ] + }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, + "GetTrainingPipeline": { + "methods": [ + "get_training_pipeline" + ] + }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, + "ListTrainingPipelines": { + "methods": [ + "list_training_pipelines" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PipelineServiceAsyncClient", + "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, + "CancelTrainingPipeline": { + "methods": [ + "cancel_training_pipeline" + ] + }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, + "CreateTrainingPipeline": { + "methods": [ + "create_training_pipeline" + ] + }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, + "DeleteTrainingPipeline": { + "methods": [ + "delete_training_pipeline" + ] + }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, + "GetTrainingPipeline": { + "methods": [ + "get_training_pipeline" + ] + }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, + "ListTrainingPipelines": { + "methods": [ + "list_training_pipelines" + ] + } + } + } + } + }, + "PredictionService": { + "clients": { + "grpc": { + "libraryClient": "PredictionServiceClient", + "rpcs": { + "Explain": { + "methods": [ + "explain" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PredictionServiceAsyncClient", + "rpcs": { + "Explain": { + "methods": [ + "explain" + ] + }, + "Predict": { + "methods": [ + "predict" + ] + } + } + } + } + }, + "SpecialistPoolService": { + "clients": { + "grpc": { + "libraryClient": "SpecialistPoolServiceClient", + "rpcs": { + "CreateSpecialistPool": { + "methods": [ + "create_specialist_pool" + ] + }, + "DeleteSpecialistPool": { + "methods": [ + "delete_specialist_pool" + ] + }, + "GetSpecialistPool": { + "methods": [ + "get_specialist_pool" + ] + }, + "ListSpecialistPools": { + "methods": [ + "list_specialist_pools" + ] + }, + "UpdateSpecialistPool": { + "methods": [ + "update_specialist_pool" + ] + } + } + }, + "grpc-async": { + "libraryClient": "SpecialistPoolServiceAsyncClient", + "rpcs": { + "CreateSpecialistPool": { + "methods": [ + "create_specialist_pool" + ] + }, + "DeleteSpecialistPool": { + "methods": [ + "delete_specialist_pool" + ] + }, + "GetSpecialistPool": { + "methods": [ + "get_specialist_pool" + ] + }, + "ListSpecialistPools": { + "methods": [ + "list_specialist_pools" + ] + }, + "UpdateSpecialistPool": { + "methods": [ + "update_specialist_pool" + ] + } + } + } + } + }, + "TensorboardService": { + "clients": { + "grpc": { + "libraryClient": "TensorboardServiceClient", + "rpcs": { + "CreateTensorboard": { + "methods": [ + "create_tensorboard" + ] + }, + "CreateTensorboardExperiment": { + "methods": [ + "create_tensorboard_experiment" + ] + }, + "CreateTensorboardRun": { + "methods": [ + "create_tensorboard_run" + ] + }, + "CreateTensorboardTimeSeries": { + "methods": [ + "create_tensorboard_time_series" + ] + }, + "DeleteTensorboard": { + "methods": [ + "delete_tensorboard" + ] + }, + "DeleteTensorboardExperiment": { + "methods": [ + "delete_tensorboard_experiment" + ] + }, + "DeleteTensorboardRun": { + "methods": [ + "delete_tensorboard_run" + ] + }, + "DeleteTensorboardTimeSeries": { + "methods": [ + "delete_tensorboard_time_series" + ] + }, + "ExportTensorboardTimeSeriesData": { + "methods": [ + "export_tensorboard_time_series_data" + ] + }, + "GetTensorboard": { + "methods": [ + "get_tensorboard" + ] + }, + "GetTensorboardExperiment": { + "methods": [ + "get_tensorboard_experiment" + ] + }, + "GetTensorboardRun": { + "methods": [ + "get_tensorboard_run" + ] + }, + "GetTensorboardTimeSeries": { + "methods": [ + "get_tensorboard_time_series" + ] + }, + "ListTensorboardExperiments": { + "methods": [ + "list_tensorboard_experiments" + ] + }, + "ListTensorboardRuns": { + "methods": [ + "list_tensorboard_runs" + ] + }, + "ListTensorboardTimeSeries": { + "methods": [ + "list_tensorboard_time_series" + ] + }, + "ListTensorboards": { + "methods": [ + "list_tensorboards" + ] + }, + "ReadTensorboardBlobData": { + "methods": [ + "read_tensorboard_blob_data" + ] + }, + "ReadTensorboardTimeSeriesData": { + "methods": [ + "read_tensorboard_time_series_data" + ] + }, + "UpdateTensorboard": { + "methods": [ + "update_tensorboard" + ] + }, + "UpdateTensorboardExperiment": { + "methods": [ + "update_tensorboard_experiment" + ] + }, + "UpdateTensorboardRun": { + "methods": [ + "update_tensorboard_run" + ] + }, + "UpdateTensorboardTimeSeries": { + "methods": [ + "update_tensorboard_time_series" + ] + }, + "WriteTensorboardRunData": { + "methods": [ + "write_tensorboard_run_data" + ] + } + } + }, + "grpc-async": { + "libraryClient": "TensorboardServiceAsyncClient", + "rpcs": { + "CreateTensorboard": { + "methods": [ + "create_tensorboard" + ] + }, + "CreateTensorboardExperiment": { + "methods": [ + "create_tensorboard_experiment" + ] + }, + "CreateTensorboardRun": { + "methods": [ + "create_tensorboard_run" + ] + }, + "CreateTensorboardTimeSeries": { + "methods": [ + "create_tensorboard_time_series" + ] + }, + "DeleteTensorboard": { + "methods": [ + "delete_tensorboard" + ] + }, + "DeleteTensorboardExperiment": { + "methods": [ + "delete_tensorboard_experiment" + ] + }, + "DeleteTensorboardRun": { + "methods": [ + "delete_tensorboard_run" + ] + }, + "DeleteTensorboardTimeSeries": { + "methods": [ + "delete_tensorboard_time_series" + ] + }, + "ExportTensorboardTimeSeriesData": { + "methods": [ + "export_tensorboard_time_series_data" + ] + }, + "GetTensorboard": { + "methods": [ + "get_tensorboard" + ] + }, + "GetTensorboardExperiment": { + "methods": [ + "get_tensorboard_experiment" + ] + }, + "GetTensorboardRun": { + "methods": [ + "get_tensorboard_run" + ] + }, + "GetTensorboardTimeSeries": { + "methods": [ + "get_tensorboard_time_series" + ] + }, + "ListTensorboardExperiments": { + "methods": [ + "list_tensorboard_experiments" + ] + }, + "ListTensorboardRuns": { + "methods": [ + "list_tensorboard_runs" + ] + }, + "ListTensorboardTimeSeries": { + "methods": [ + "list_tensorboard_time_series" + ] + }, + "ListTensorboards": { + "methods": [ + "list_tensorboards" + ] + }, + "ReadTensorboardBlobData": { + "methods": [ + "read_tensorboard_blob_data" + ] + }, + "ReadTensorboardTimeSeriesData": { + "methods": [ + "read_tensorboard_time_series_data" + ] + }, + "UpdateTensorboard": { + "methods": [ + "update_tensorboard" + ] + }, + "UpdateTensorboardExperiment": { + "methods": [ + "update_tensorboard_experiment" + ] + }, + "UpdateTensorboardRun": { + "methods": [ + "update_tensorboard_run" + ] + }, + "UpdateTensorboardTimeSeries": { + "methods": [ + "update_tensorboard_time_series" + ] + }, + "WriteTensorboardRunData": { + "methods": [ + "write_tensorboard_run_data" + ] + } + } + } + } + }, + "VizierService": { + "clients": { + "grpc": { + "libraryClient": "VizierServiceClient", + "rpcs": { + "AddTrialMeasurement": { + "methods": [ + "add_trial_measurement" + ] + }, + "CheckTrialEarlyStoppingState": { + "methods": [ + "check_trial_early_stopping_state" + ] + }, + "CompleteTrial": { + "methods": [ + "complete_trial" + ] + }, + "CreateStudy": { + "methods": [ + "create_study" + ] + }, + "CreateTrial": { + "methods": [ + "create_trial" + ] + }, + "DeleteStudy": { + "methods": [ + "delete_study" + ] + }, + "DeleteTrial": { + "methods": [ + "delete_trial" + ] + }, + "GetStudy": { + "methods": [ + "get_study" + ] + }, + "GetTrial": { + "methods": [ + "get_trial" + ] + }, + "ListOptimalTrials": { + "methods": [ + "list_optimal_trials" + ] + }, + "ListStudies": { + "methods": [ + "list_studies" + ] + }, + "ListTrials": { + "methods": [ + "list_trials" + ] + }, + "LookupStudy": { + "methods": [ + "lookup_study" + ] + }, + "StopTrial": { + "methods": [ + "stop_trial" + ] + }, + "SuggestTrials": { + "methods": [ + "suggest_trials" + ] + } + } + }, + "grpc-async": { + "libraryClient": "VizierServiceAsyncClient", + "rpcs": { + "AddTrialMeasurement": { + "methods": [ + "add_trial_measurement" + ] + }, + "CheckTrialEarlyStoppingState": { + "methods": [ + "check_trial_early_stopping_state" + ] + }, + "CompleteTrial": { + "methods": [ + "complete_trial" + ] + }, + "CreateStudy": { + "methods": [ + "create_study" + ] + }, + "CreateTrial": { + "methods": [ + "create_trial" + ] + }, + "DeleteStudy": { + "methods": [ + "delete_study" + ] + }, + "DeleteTrial": { + "methods": [ + "delete_trial" + ] + }, + "GetStudy": { + "methods": [ + "get_study" + ] + }, + "GetTrial": { + "methods": [ + "get_trial" + ] + }, + "ListOptimalTrials": { + "methods": [ + "list_optimal_trials" + ] + }, + "ListStudies": { + "methods": [ + "list_studies" + ] + }, + "ListTrials": { + "methods": [ + "list_trials" + ] + }, + "LookupStudy": { + "methods": [ + "lookup_study" + ] + }, + "StopTrial": { + "methods": [ + "stop_trial" + ] + }, + "SuggestTrials": { + "methods": [ + "suggest_trials" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed new file mode 100644 index 0000000000..228f1c51c6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py new file mode 100644 index 0000000000..44e8fb2115 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import DatasetServiceClient +from .async_client import DatasetServiceAsyncClient + +__all__ = ( + 'DatasetServiceClient', + 'DatasetServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py new file mode 100644 index 0000000000..8255259240 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py @@ -0,0 +1,1074 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport +from .client import DatasetServiceClient + + +class DatasetServiceAsyncClient: + """""" + + _client: DatasetServiceClient + + DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT + + annotation_path = staticmethod(DatasetServiceClient.annotation_path) + parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path) + annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path) + parse_annotation_spec_path = staticmethod(DatasetServiceClient.parse_annotation_spec_path) + data_item_path = staticmethod(DatasetServiceClient.data_item_path) + parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path) + dataset_path = staticmethod(DatasetServiceClient.dataset_path) + parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path) + common_billing_account_path = staticmethod(DatasetServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(DatasetServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(DatasetServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(DatasetServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(DatasetServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(DatasetServiceClient.parse_common_organization_path) + common_project_path = staticmethod(DatasetServiceClient.common_project_path) + parse_common_project_path = staticmethod(DatasetServiceClient.parse_common_project_path) + common_location_path = staticmethod(DatasetServiceClient.common_location_path) + parse_common_location_path = staticmethod(DatasetServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceAsyncClient: The constructed client. + """ + return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceAsyncClient: The constructed client. + """ + return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DatasetServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DatasetServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, DatasetServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the dataset service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.DatasetServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = DatasetServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_dataset(self, + request: dataset_service.CreateDatasetRequest = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest`): + The request object. Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Dataset in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (:class:`google.cloud.aiplatform_v1beta1.types.Dataset`): + Required. The Dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Dataset` A + collection of DataItems and Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.CreateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_dataset.Dataset, + metadata_type=dataset_service.CreateDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_dataset(self, + request: dataset_service.GetDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetDatasetRequest`): + The request object. Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. + name (:class:`str`): + Required. The name of the Dataset + resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.GetDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_dataset(self, + request: dataset_service.UpdateDatasetRequest = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest`): + The request object. Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. + dataset (:class:`google.cloud.aiplatform_v1beta1.types.Dataset`): + Required. The Dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.UpdateDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_datasets(self, + request: dataset_service.ListDatasetsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsAsyncPager: + r"""Lists Datasets in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest`): + The request object. Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + parent (:class:`str`): + Required. The name of the Dataset's parent resource. + Format: ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsAsyncPager: + Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListDatasetsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_datasets, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDatasetsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_dataset(self, + request: dataset_service.DeleteDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest`): + The request object. Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. + name (:class:`str`): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.DeleteDatasetRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_dataset, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_data(self, + request: dataset_service.ImportDataRequest = None, + *, + name: str = None, + import_configs: Sequence[dataset.ImportDataConfig] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports data into a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ImportDataRequest`): + The request object. Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + name (:class:`str`): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_configs (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]`): + Required. The desired input + locations. The contents of all input + locations will be imported in one batch. + + This corresponds to the ``import_configs`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` + Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, import_configs]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ImportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if import_configs: + request.import_configs.extend(import_configs) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + dataset_service.ImportDataResponse, + metadata_type=dataset_service.ImportDataOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_data(self, + request: dataset_service.ExportDataRequest = None, + *, + name: str = None, + export_config: dataset.ExportDataConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports data from a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ExportDataRequest`): + The request object. Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + name (:class:`str`): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + export_config (:class:`google.cloud.aiplatform_v1beta1.types.ExportDataConfig`): + Required. The desired output + location. + + This corresponds to the ``export_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` + Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, export_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ExportDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if export_config is not None: + request.export_config = export_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_data, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + dataset_service.ExportDataResponse, + metadata_type=dataset_service.ExportDataOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_data_items(self, + request: dataset_service.ListDataItemsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataItemsAsyncPager: + r"""Lists DataItems in a Dataset. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest`): + The request object. Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + parent (:class:`str`): + Required. The resource name of the Dataset to list + DataItems from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsAsyncPager: + Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListDataItemsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_data_items, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDataItemsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_annotation_spec(self, + request: dataset_service.GetAnnotationSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an AnnotationSpec. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest`): + The request object. Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. + name (:class:`str`): + Required. The name of the AnnotationSpec resource. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AnnotationSpec: + Identifies a concept with which + DataItems may be annotated with. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.GetAnnotationSpecRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_annotation_spec, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_annotations(self, + request: dataset_service.ListAnnotationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAnnotationsAsyncPager: + r"""Lists Annotations belongs to a dataitem + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest`): + The request object. Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + parent (:class:`str`): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsAsyncPager: + Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = dataset_service.ListAnnotationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_annotations, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAnnotationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DatasetServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py new file mode 100644 index 0000000000..d7d6fab3c3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py @@ -0,0 +1,1288 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import DatasetServiceGrpcTransport +from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport + + +class DatasetServiceClientMeta(type): + """Metaclass for the DatasetService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] + _transport_registry["grpc"] = DatasetServiceGrpcTransport + _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[DatasetServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DatasetServiceClient(metaclass=DatasetServiceClientMeta): + """""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DatasetServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DatasetServiceTransport: + """Returns the transport used by the client instance. + + Returns: + DatasetServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotation_path(project: str,location: str,dataset: str,data_item: str,annotation: str,) -> str: + """Returns a fully-qualified annotation string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) + + @staticmethod + def parse_annotation_path(path: str) -> Dict[str,str]: + """Parses a annotation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: + """Returns a fully-qualified annotation_spec string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + + @staticmethod + def parse_annotation_spec_path(path: str) -> Dict[str,str]: + """Parses a annotation_spec path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def data_item_path(project: str,location: str,dataset: str,data_item: str,) -> str: + """Returns a fully-qualified data_item string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) + + @staticmethod + def parse_data_item_path(path: str) -> Dict[str,str]: + """Parses a data_item path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, DatasetServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the dataset service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, DatasetServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, DatasetServiceTransport): + # transport is a DatasetServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_dataset(self, + request: dataset_service.CreateDatasetRequest = None, + *, + parent: str = None, + dataset: gca_dataset.Dataset = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a Dataset. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest): + The request object. Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + parent (str): + Required. The resource name of the Location to create + the Dataset in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + dataset (google.cloud.aiplatform_v1beta1.types.Dataset): + Required. The Dataset to create. + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Dataset` A + collection of DataItems and Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, dataset]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.CreateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.CreateDatasetRequest): + request = dataset_service.CreateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if dataset is not None: + request.dataset = dataset + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_dataset.Dataset, + metadata_type=dataset_service.CreateDatasetOperationMetadata, + ) + + # Done; return the response. + return response + + def get_dataset(self, + request: dataset_service.GetDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> dataset.Dataset: + r"""Gets a Dataset. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetDatasetRequest): + The request object. Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. + name (str): + Required. The name of the Dataset + resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.GetDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.GetDatasetRequest): + request = dataset_service.GetDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_dataset(self, + request: dataset_service.UpdateDatasetRequest = None, + *, + dataset: gca_dataset.Dataset = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_dataset.Dataset: + r"""Updates a Dataset. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest): + The request object. Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. + dataset (google.cloud.aiplatform_v1beta1.types.Dataset): + Required. The Dataset which replaces + the resource on the server. + + This corresponds to the ``dataset`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Dataset: + A collection of DataItems and + Annotations on them. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([dataset, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.UpdateDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.UpdateDatasetRequest): + request = dataset_service.UpdateDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if dataset is not None: + request.dataset = dataset + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("dataset.name", request.dataset.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_datasets(self, + request: dataset_service.ListDatasetsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDatasetsPager: + r"""Lists Datasets in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): + The request object. Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + parent (str): + Required. The name of the Dataset's parent resource. + Format: ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsPager: + Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListDatasetsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListDatasetsRequest): + request = dataset_service.ListDatasetsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_datasets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDatasetsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_dataset(self, + request: dataset_service.DeleteDatasetRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Dataset. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest): + The request object. Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. + name (str): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.DeleteDatasetRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.DeleteDatasetRequest): + request = dataset_service.DeleteDatasetRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_dataset] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def import_data(self, + request: dataset_service.ImportDataRequest = None, + *, + name: str = None, + import_configs: Sequence[dataset.ImportDataConfig] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports data into a Dataset. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ImportDataRequest): + The request object. Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]): + Required. The desired input + locations. The contents of all input + locations will be imported in one batch. + + This corresponds to the ``import_configs`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` + Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, import_configs]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ImportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ImportDataRequest): + request = dataset_service.ImportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if import_configs is not None: + request.import_configs = import_configs + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + dataset_service.ImportDataResponse, + metadata_type=dataset_service.ImportDataOperationMetadata, + ) + + # Done; return the response. + return response + + def export_data(self, + request: dataset_service.ExportDataRequest = None, + *, + name: str = None, + export_config: dataset.ExportDataConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports data from a Dataset. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ExportDataRequest): + The request object. Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + export_config (google.cloud.aiplatform_v1beta1.types.ExportDataConfig): + Required. The desired output + location. + + This corresponds to the ``export_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` + Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, export_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ExportDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ExportDataRequest): + request = dataset_service.ExportDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if export_config is not None: + request.export_config = export_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + dataset_service.ExportDataResponse, + metadata_type=dataset_service.ExportDataOperationMetadata, + ) + + # Done; return the response. + return response + + def list_data_items(self, + request: dataset_service.ListDataItemsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataItemsPager: + r"""Lists DataItems in a Dataset. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): + The request object. Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + parent (str): + Required. The resource name of the Dataset to list + DataItems from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsPager: + Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListDataItemsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListDataItemsRequest): + request = dataset_service.ListDataItemsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_data_items] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDataItemsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_annotation_spec(self, + request: dataset_service.GetAnnotationSpecRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> annotation_spec.AnnotationSpec: + r"""Gets an AnnotationSpec. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest): + The request object. Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. + name (str): + Required. The name of the AnnotationSpec resource. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AnnotationSpec: + Identifies a concept with which + DataItems may be annotated with. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.GetAnnotationSpecRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.GetAnnotationSpecRequest): + request = dataset_service.GetAnnotationSpecRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_annotations(self, + request: dataset_service.ListAnnotationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAnnotationsPager: + r"""Lists Annotations belongs to a dataitem + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): + The request object. Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + parent (str): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsPager: + Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a dataset_service.ListAnnotationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, dataset_service.ListAnnotationsRequest): + request = dataset_service.ListAnnotationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_annotations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAnnotationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DatasetServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py new file mode 100644 index 0000000000..35216020c7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py @@ -0,0 +1,387 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service + + +class ListDatasetsPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListDatasetsResponse], + request: dataset_service.ListDatasetsRequest, + response: dataset_service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[dataset_service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[dataset.Dataset]: + for page in self.pages: + yield from page.datasets + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDatasetsAsyncPager: + """A pager for iterating through ``list_datasets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``datasets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDatasets`` requests and continue to iterate + through the ``datasets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListDatasetsResponse]], + request: dataset_service.ListDatasetsRequest, + response: dataset_service.ListDatasetsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDatasetsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[dataset_service.ListDatasetsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[dataset.Dataset]: + async def async_generator(): + async for page in self.pages: + for response in page.datasets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataItemsPager: + """A pager for iterating through ``list_data_items`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``data_items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDataItems`` requests and continue to iterate + through the ``data_items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListDataItemsResponse], + request: dataset_service.ListDataItemsRequest, + response: dataset_service.ListDataItemsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDataItemsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[dataset_service.ListDataItemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[data_item.DataItem]: + for page in self.pages: + yield from page.data_items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataItemsAsyncPager: + """A pager for iterating through ``list_data_items`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``data_items`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDataItems`` requests and continue to iterate + through the ``data_items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListDataItemsResponse]], + request: dataset_service.ListDataItemsRequest, + response: dataset_service.ListDataItemsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListDataItemsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[dataset_service.ListDataItemsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[data_item.DataItem]: + async def async_generator(): + async for page in self.pages: + for response in page.data_items: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAnnotationsPager: + """A pager for iterating through ``list_annotations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``annotations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAnnotations`` requests and continue to iterate + through the ``annotations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., dataset_service.ListAnnotationsResponse], + request: dataset_service.ListAnnotationsRequest, + response: dataset_service.ListAnnotationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListAnnotationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[dataset_service.ListAnnotationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[annotation.Annotation]: + for page in self.pages: + yield from page.annotations + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAnnotationsAsyncPager: + """A pager for iterating through ``list_annotations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``annotations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAnnotations`` requests and continue to iterate + through the ``annotations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[dataset_service.ListAnnotationsResponse]], + request: dataset_service.ListAnnotationsRequest, + response: dataset_service.ListAnnotationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = dataset_service.ListAnnotationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[dataset_service.ListAnnotationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[annotation.Annotation]: + async def async_generator(): + async for page in self.pages: + for response in page.annotations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py new file mode 100644 index 0000000000..561b0c5cfd --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import DatasetServiceTransport +from .grpc import DatasetServiceGrpcTransport +from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] +_transport_registry['grpc'] = DatasetServiceGrpcTransport +_transport_registry['grpc_asyncio'] = DatasetServiceGrpcAsyncIOTransport + +__all__ = ( + 'DatasetServiceTransport', + 'DatasetServiceGrpcTransport', + 'DatasetServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py new file mode 100644 index 0000000000..c5e42b084d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py @@ -0,0 +1,304 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class DatasetServiceTransport(abc.ABC): + """Abstract transport class for DatasetService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_dataset: gapic_v1.method.wrap_method( + self.create_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.get_dataset: gapic_v1.method.wrap_method( + self.get_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.update_dataset: gapic_v1.method.wrap_method( + self.update_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.list_datasets: gapic_v1.method.wrap_method( + self.list_datasets, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_dataset: gapic_v1.method.wrap_method( + self.delete_dataset, + default_timeout=5.0, + client_info=client_info, + ), + self.import_data: gapic_v1.method.wrap_method( + self.import_data, + default_timeout=5.0, + client_info=client_info, + ), + self.export_data: gapic_v1.method.wrap_method( + self.export_data, + default_timeout=5.0, + client_info=client_info, + ), + self.list_data_items: gapic_v1.method.wrap_method( + self.list_data_items, + default_timeout=5.0, + client_info=client_info, + ), + self.get_annotation_spec: gapic_v1.method.wrap_method( + self.get_annotation_spec, + default_timeout=5.0, + client_info=client_info, + ), + self.list_annotations: gapic_v1.method.wrap_method( + self.list_annotations, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + Union[ + dataset.Dataset, + Awaitable[dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + Union[ + gca_dataset.Dataset, + Awaitable[gca_dataset.Dataset] + ]]: + raise NotImplementedError() + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + Union[ + dataset_service.ListDatasetsResponse, + Awaitable[dataset_service.ListDatasetsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + Union[ + dataset_service.ListDataItemsResponse, + Awaitable[dataset_service.ListDataItemsResponse] + ]]: + raise NotImplementedError() + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + Union[ + annotation_spec.AnnotationSpec, + Awaitable[annotation_spec.AnnotationSpec] + ]]: + raise NotImplementedError() + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + Union[ + dataset_service.ListAnnotationsResponse, + Awaitable[dataset_service.ListAnnotationsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'DatasetServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py new file mode 100644 index 0000000000..9091949fe5 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py @@ -0,0 +1,506 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.longrunning import operations_pb2 # type: ignore +from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO + + +class DatasetServiceGrpcTransport(DatasetServiceTransport): + """gRPC backend transport for DatasetService. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a Dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/CreateDataset', + request_serializer=dataset_service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + dataset.Dataset]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a Dataset. + + Returns: + Callable[[~.GetDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/GetDataset', + request_serializer=dataset_service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + gca_dataset.Dataset]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a Dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + ~.Dataset]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/UpdateDataset', + request_serializer=dataset_service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + dataset_service.ListDatasetsResponse]: + r"""Return a callable for the list datasets method over gRPC. + + Lists Datasets in a Location. + + Returns: + Callable[[~.ListDatasetsRequest], + ~.ListDatasetsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListDatasets', + request_serializer=dataset_service.ListDatasetsRequest.serialize, + response_deserializer=dataset_service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a Dataset. + + Returns: + Callable[[~.DeleteDatasetRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/DeleteDataset', + request_serializer=dataset_service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a Dataset. + + Returns: + Callable[[~.ImportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ImportData', + request_serializer=dataset_service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + operations_pb2.Operation]: + r"""Return a callable for the export data method over gRPC. + + Exports data from a Dataset. + + Returns: + Callable[[~.ExportDataRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ExportData', + request_serializer=dataset_service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + dataset_service.ListDataItemsResponse]: + r"""Return a callable for the list data items method over gRPC. + + Lists DataItems in a Dataset. + + Returns: + Callable[[~.ListDataItemsRequest], + ~.ListDataItemsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_items' not in self._stubs: + self._stubs['list_data_items'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListDataItems', + request_serializer=dataset_service.ListDataItemsRequest.serialize, + response_deserializer=dataset_service.ListDataItemsResponse.deserialize, + ) + return self._stubs['list_data_items'] + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + annotation_spec.AnnotationSpec]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an AnnotationSpec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + ~.AnnotationSpec]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/GetAnnotationSpec', + request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + dataset_service.ListAnnotationsResponse]: + r"""Return a callable for the list annotations method over gRPC. + + Lists Annotations belongs to a dataitem + + Returns: + Callable[[~.ListAnnotationsRequest], + ~.ListAnnotationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_annotations' not in self._stubs: + self._stubs['list_annotations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListAnnotations', + request_serializer=dataset_service.ListAnnotationsRequest.serialize, + response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, + ) + return self._stubs['list_annotations'] + + +__all__ = ( + 'DatasetServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..d8c7f0a444 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py @@ -0,0 +1,510 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.longrunning import operations_pb2 # type: ignore +from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import DatasetServiceGrpcTransport + + +class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport): + """gRPC AsyncIO backend transport for DatasetService. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_dataset(self) -> Callable[ + [dataset_service.CreateDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create dataset method over gRPC. + + Creates a Dataset. + + Returns: + Callable[[~.CreateDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_dataset' not in self._stubs: + self._stubs['create_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/CreateDataset', + request_serializer=dataset_service.CreateDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_dataset'] + + @property + def get_dataset(self) -> Callable[ + [dataset_service.GetDatasetRequest], + Awaitable[dataset.Dataset]]: + r"""Return a callable for the get dataset method over gRPC. + + Gets a Dataset. + + Returns: + Callable[[~.GetDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_dataset' not in self._stubs: + self._stubs['get_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/GetDataset', + request_serializer=dataset_service.GetDatasetRequest.serialize, + response_deserializer=dataset.Dataset.deserialize, + ) + return self._stubs['get_dataset'] + + @property + def update_dataset(self) -> Callable[ + [dataset_service.UpdateDatasetRequest], + Awaitable[gca_dataset.Dataset]]: + r"""Return a callable for the update dataset method over gRPC. + + Updates a Dataset. + + Returns: + Callable[[~.UpdateDatasetRequest], + Awaitable[~.Dataset]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_dataset' not in self._stubs: + self._stubs['update_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/UpdateDataset', + request_serializer=dataset_service.UpdateDatasetRequest.serialize, + response_deserializer=gca_dataset.Dataset.deserialize, + ) + return self._stubs['update_dataset'] + + @property + def list_datasets(self) -> Callable[ + [dataset_service.ListDatasetsRequest], + Awaitable[dataset_service.ListDatasetsResponse]]: + r"""Return a callable for the list datasets method over gRPC. + + Lists Datasets in a Location. + + Returns: + Callable[[~.ListDatasetsRequest], + Awaitable[~.ListDatasetsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_datasets' not in self._stubs: + self._stubs['list_datasets'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListDatasets', + request_serializer=dataset_service.ListDatasetsRequest.serialize, + response_deserializer=dataset_service.ListDatasetsResponse.deserialize, + ) + return self._stubs['list_datasets'] + + @property + def delete_dataset(self) -> Callable[ + [dataset_service.DeleteDatasetRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete dataset method over gRPC. + + Deletes a Dataset. + + Returns: + Callable[[~.DeleteDatasetRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_dataset' not in self._stubs: + self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/DeleteDataset', + request_serializer=dataset_service.DeleteDatasetRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_dataset'] + + @property + def import_data(self) -> Callable[ + [dataset_service.ImportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the import data method over gRPC. + + Imports data into a Dataset. + + Returns: + Callable[[~.ImportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_data' not in self._stubs: + self._stubs['import_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ImportData', + request_serializer=dataset_service.ImportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_data'] + + @property + def export_data(self) -> Callable[ + [dataset_service.ExportDataRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export data method over gRPC. + + Exports data from a Dataset. + + Returns: + Callable[[~.ExportDataRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_data' not in self._stubs: + self._stubs['export_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ExportData', + request_serializer=dataset_service.ExportDataRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_data'] + + @property + def list_data_items(self) -> Callable[ + [dataset_service.ListDataItemsRequest], + Awaitable[dataset_service.ListDataItemsResponse]]: + r"""Return a callable for the list data items method over gRPC. + + Lists DataItems in a Dataset. + + Returns: + Callable[[~.ListDataItemsRequest], + Awaitable[~.ListDataItemsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_items' not in self._stubs: + self._stubs['list_data_items'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListDataItems', + request_serializer=dataset_service.ListDataItemsRequest.serialize, + response_deserializer=dataset_service.ListDataItemsResponse.deserialize, + ) + return self._stubs['list_data_items'] + + @property + def get_annotation_spec(self) -> Callable[ + [dataset_service.GetAnnotationSpecRequest], + Awaitable[annotation_spec.AnnotationSpec]]: + r"""Return a callable for the get annotation spec method over gRPC. + + Gets an AnnotationSpec. + + Returns: + Callable[[~.GetAnnotationSpecRequest], + Awaitable[~.AnnotationSpec]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_annotation_spec' not in self._stubs: + self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/GetAnnotationSpec', + request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, + response_deserializer=annotation_spec.AnnotationSpec.deserialize, + ) + return self._stubs['get_annotation_spec'] + + @property + def list_annotations(self) -> Callable[ + [dataset_service.ListAnnotationsRequest], + Awaitable[dataset_service.ListAnnotationsResponse]]: + r"""Return a callable for the list annotations method over gRPC. + + Lists Annotations belongs to a dataitem + + Returns: + Callable[[~.ListAnnotationsRequest], + Awaitable[~.ListAnnotationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_annotations' not in self._stubs: + self._stubs['list_annotations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.DatasetService/ListAnnotations', + request_serializer=dataset_service.ListAnnotationsRequest.serialize, + response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, + ) + return self._stubs['list_annotations'] + + +__all__ = ( + 'DatasetServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py new file mode 100644 index 0000000000..7db43e768e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import EndpointServiceClient +from .async_client import EndpointServiceAsyncClient + +__all__ = ( + 'EndpointServiceClient', + 'EndpointServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py new file mode 100644 index 0000000000..0d22bf8ce6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -0,0 +1,860 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport +from .client import EndpointServiceClient + + +class EndpointServiceAsyncClient: + """""" + + _client: EndpointServiceClient + + DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(EndpointServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) + model_path = staticmethod(EndpointServiceClient.model_path) + parse_model_path = staticmethod(EndpointServiceClient.parse_model_path) + common_billing_account_path = staticmethod(EndpointServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(EndpointServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(EndpointServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(EndpointServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(EndpointServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(EndpointServiceClient.parse_common_organization_path) + common_project_path = staticmethod(EndpointServiceClient.common_project_path) + parse_common_project_path = staticmethod(EndpointServiceClient.parse_common_project_path) + common_location_path = staticmethod(EndpointServiceClient.common_location_path) + parse_common_location_path = staticmethod(EndpointServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceAsyncClient: The constructed client. + """ + return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceAsyncClient: The constructed client. + """ + return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> EndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EndpointServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, EndpointServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.EndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = EndpointServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_endpoint(self, + request: endpoint_service.CreateEndpointRequest = None, + *, + parent: str = None, + endpoint: gca_endpoint.Endpoint = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an Endpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest`): + The request object. Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Endpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`): + Required. The Endpoint to create. + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, endpoint]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.CreateEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if endpoint is not None: + request.endpoint = endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_endpoint.Endpoint, + metadata_type=endpoint_service.CreateEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_endpoint(self, + request: endpoint_service.GetEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> endpoint.Endpoint: + r"""Gets an Endpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetEndpointRequest`): + The request object. Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] + name (:class:`str`): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.GetEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_endpoints(self, + request: endpoint_service.ListEndpointsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEndpointsAsyncPager: + r"""Lists Endpoints in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest`): + The request object. Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsAsyncPager: + Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.ListEndpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_endpoints, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEndpointsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_endpoint(self, + request: endpoint_service.UpdateEndpointRequest = None, + *, + endpoint: gca_endpoint.Endpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_endpoint.Endpoint: + r"""Updates an Endpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest`): + The request object. Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`): + Required. The Endpoint which replaces + the resource on the server. + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.UpdateEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint.name", request.endpoint.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_endpoint(self, + request: endpoint_service.DeleteEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Endpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest`): + The request object. Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. + name (:class:`str`): + Required. The name of the Endpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.DeleteEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_model(self, + request: endpoint_service.DeployModelRequest = None, + *, + endpoint: str = None, + deployed_model: gca_endpoint.DeployedModel = None, + traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeployModelRequest`): + The request object. Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + endpoint (:class:`str`): + Required. The name of the Endpoint resource into which + to deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model (:class:`google.cloud.aiplatform_v1beta1.types.DeployedModel`): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + + This corresponds to the ``deployed_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]`): + A map from a DeployedModel's ID to the percentage of + this Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the + just being deployed Model, a "0" should be used, and the + actual ID of the new DeployedModel will be filled in its + place by this method. The traffic percentage values must + add up to 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + is not updated. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` + Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.DeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model is not None: + request.deployed_model = deployed_model + + if traffic_split: + request.traffic_split.update(traffic_split) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + endpoint_service.DeployModelResponse, + metadata_type=endpoint_service.DeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_model(self, + request: endpoint_service.UndeployModelRequest = None, + *, + endpoint: str = None, + deployed_model_id: str = None, + traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UndeployModelRequest`): + The request object. Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + endpoint (:class:`str`): + Required. The name of the Endpoint resource from which + to undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + Required. The ID of the DeployedModel + to be undeployed from the Endpoint. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]`): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is + being undeployed from the Endpoint, the + [Endpoint.traffic_split] will always end up empty when + this call returns. A DeployedModel will be successfully + undeployed only if it doesn't have any traffic assigned + to it when this method executes, or if this field + unassigns any traffic to it. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` + Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = endpoint_service.UndeployModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + if traffic_split: + request.traffic_split.update(traffic_split) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + endpoint_service.UndeployModelResponse, + metadata_type=endpoint_service.UndeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "EndpointServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py new file mode 100644 index 0000000000..9ad6408d14 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -0,0 +1,1054 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import EndpointServiceGrpcTransport +from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport + + +class EndpointServiceClientMeta(type): + """Metaclass for the EndpointService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] + _transport_registry["grpc"] = EndpointServiceGrpcTransport + _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[EndpointServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class EndpointServiceClient(metaclass=EndpointServiceClientMeta): + """""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> EndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EndpointServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, EndpointServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, EndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, EndpointServiceTransport): + # transport is a EndpointServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_endpoint(self, + request: endpoint_service.CreateEndpointRequest = None, + *, + parent: str = None, + endpoint: gca_endpoint.Endpoint = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an Endpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest): + The request object. Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + parent (str): + Required. The resource name of the Location to create + the Endpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): + Required. The Endpoint to create. + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, endpoint]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.CreateEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.CreateEndpointRequest): + request = endpoint_service.CreateEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if endpoint is not None: + request.endpoint = endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_endpoint.Endpoint, + metadata_type=endpoint_service.CreateEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + def get_endpoint(self, + request: endpoint_service.GetEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> endpoint.Endpoint: + r"""Gets an Endpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetEndpointRequest): + The request object. Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] + name (str): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.GetEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.GetEndpointRequest): + request = endpoint_service.GetEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_endpoints(self, + request: endpoint_service.ListEndpointsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEndpointsPager: + r"""Lists Endpoints in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): + The request object. Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + parent (str): + Required. The resource name of the Location from which + to list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsPager: + Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.ListEndpointsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.ListEndpointsRequest): + request = endpoint_service.ListEndpointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_endpoints] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEndpointsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_endpoint(self, + request: endpoint_service.UpdateEndpointRequest = None, + *, + endpoint: gca_endpoint.Endpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_endpoint.Endpoint: + r"""Updates an Endpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest): + The request object. Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): + Required. The Endpoint which replaces + the resource on the server. + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Endpoint: + Models are deployed into it, and + afterwards Endpoint is called to obtain + predictions and explanations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.UpdateEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.UpdateEndpointRequest): + request = endpoint_service.UpdateEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint.name", request.endpoint.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_endpoint(self, + request: endpoint_service.DeleteEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Endpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest): + The request object. Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. + name (str): + Required. The name of the Endpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.DeleteEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.DeleteEndpointRequest): + request = endpoint_service.DeleteEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_model(self, + request: endpoint_service.DeployModelRequest = None, + *, + endpoint: str = None, + deployed_model: gca_endpoint.DeployedModel = None, + traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeployModelRequest): + The request object. Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + endpoint (str): + Required. The name of the Endpoint resource into which + to deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + + This corresponds to the ``deployed_model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]): + A map from a DeployedModel's ID to the percentage of + this Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the + just being deployed Model, a "0" should be used, and the + actual ID of the new DeployedModel will be filled in its + place by this method. The traffic percentage values must + add up to 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + is not updated. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` + Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.DeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.DeployModelRequest): + request = endpoint_service.DeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model is not None: + request.deployed_model = deployed_model + if traffic_split is not None: + request.traffic_split = traffic_split + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + endpoint_service.DeployModelResponse, + metadata_type=endpoint_service.DeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_model(self, + request: endpoint_service.UndeployModelRequest = None, + *, + endpoint: str = None, + deployed_model_id: str = None, + traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UndeployModelRequest): + The request object. Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + endpoint (str): + Required. The name of the Endpoint resource from which + to undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + Required. The ID of the DeployedModel + to be undeployed from the Endpoint. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is + being undeployed from the Endpoint, the + [Endpoint.traffic_split] will always end up empty when + this call returns. A DeployedModel will be successfully + undeployed only if it doesn't have any traffic assigned + to it when this method executes, or if this field + unassigns any traffic to it. + + This corresponds to the ``traffic_split`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` + Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a endpoint_service.UndeployModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, endpoint_service.UndeployModelRequest): + request = endpoint_service.UndeployModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + if traffic_split is not None: + request.traffic_split = traffic_split + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + endpoint_service.UndeployModelResponse, + metadata_type=endpoint_service.UndeployModelOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "EndpointServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py new file mode 100644 index 0000000000..80c94eacf9 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service + + +class ListEndpointsPager: + """A pager for iterating through ``list_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``endpoints`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEndpoints`` requests and continue to iterate + through the ``endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., endpoint_service.ListEndpointsResponse], + request: endpoint_service.ListEndpointsRequest, + response: endpoint_service.ListEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = endpoint_service.ListEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[endpoint_service.ListEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[endpoint.Endpoint]: + for page in self.pages: + yield from page.endpoints + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEndpointsAsyncPager: + """A pager for iterating through ``list_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``endpoints`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEndpoints`` requests and continue to iterate + through the ``endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[endpoint_service.ListEndpointsResponse]], + request: endpoint_service.ListEndpointsRequest, + response: endpoint_service.ListEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = endpoint_service.ListEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[endpoint_service.ListEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[endpoint.Endpoint]: + async def async_generator(): + async for page in self.pages: + for response in page.endpoints: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py new file mode 100644 index 0000000000..a062fc074c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import EndpointServiceTransport +from .grpc import EndpointServiceGrpcTransport +from .grpc_asyncio import EndpointServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] +_transport_registry['grpc'] = EndpointServiceGrpcTransport +_transport_registry['grpc_asyncio'] = EndpointServiceGrpcAsyncIOTransport + +__all__ = ( + 'EndpointServiceTransport', + 'EndpointServiceGrpcTransport', + 'EndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py new file mode 100644 index 0000000000..f41b7dab50 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class EndpointServiceTransport(abc.ABC): + """Abstract transport class for EndpointService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_endpoint: gapic_v1.method.wrap_method( + self.create_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.get_endpoint: gapic_v1.method.wrap_method( + self.get_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.list_endpoints: gapic_v1.method.wrap_method( + self.list_endpoints, + default_timeout=5.0, + client_info=client_info, + ), + self.update_endpoint: gapic_v1.method.wrap_method( + self.update_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_endpoint: gapic_v1.method.wrap_method( + self.delete_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.deploy_model: gapic_v1.method.wrap_method( + self.deploy_model, + default_timeout=5.0, + client_info=client_info, + ), + self.undeploy_model: gapic_v1.method.wrap_method( + self.undeploy_model, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + Union[ + endpoint.Endpoint, + Awaitable[endpoint.Endpoint] + ]]: + raise NotImplementedError() + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + Union[ + endpoint_service.ListEndpointsResponse, + Awaitable[endpoint_service.ListEndpointsResponse] + ]]: + raise NotImplementedError() + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + Union[ + gca_endpoint.Endpoint, + Awaitable[gca_endpoint.Endpoint] + ]]: + raise NotImplementedError() + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'EndpointServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py new file mode 100644 index 0000000000..03282d4be0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py @@ -0,0 +1,430 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.longrunning import operations_pb2 # type: ignore +from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO + + +class EndpointServiceGrpcTransport(EndpointServiceTransport): + """gRPC backend transport for EndpointService. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the create endpoint method over gRPC. + + Creates an Endpoint. + + Returns: + Callable[[~.CreateEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_endpoint' not in self._stubs: + self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/CreateEndpoint', + request_serializer=endpoint_service.CreateEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_endpoint'] + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + endpoint.Endpoint]: + r"""Return a callable for the get endpoint method over gRPC. + + Gets an Endpoint. + + Returns: + Callable[[~.GetEndpointRequest], + ~.Endpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_endpoint' not in self._stubs: + self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/GetEndpoint', + request_serializer=endpoint_service.GetEndpointRequest.serialize, + response_deserializer=endpoint.Endpoint.deserialize, + ) + return self._stubs['get_endpoint'] + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + endpoint_service.ListEndpointsResponse]: + r"""Return a callable for the list endpoints method over gRPC. + + Lists Endpoints in a Location. + + Returns: + Callable[[~.ListEndpointsRequest], + ~.ListEndpointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_endpoints' not in self._stubs: + self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/ListEndpoints', + request_serializer=endpoint_service.ListEndpointsRequest.serialize, + response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, + ) + return self._stubs['list_endpoints'] + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + gca_endpoint.Endpoint]: + r"""Return a callable for the update endpoint method over gRPC. + + Updates an Endpoint. + + Returns: + Callable[[~.UpdateEndpointRequest], + ~.Endpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_endpoint' not in self._stubs: + self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/UpdateEndpoint', + request_serializer=endpoint_service.UpdateEndpointRequest.serialize, + response_deserializer=gca_endpoint.Endpoint.deserialize, + ) + return self._stubs['update_endpoint'] + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete endpoint method over gRPC. + + Deletes an Endpoint. + + Returns: + Callable[[~.DeleteEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_endpoint' not in self._stubs: + self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/DeleteEndpoint', + request_serializer=endpoint_service.DeleteEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_endpoint'] + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Returns: + Callable[[~.DeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/DeployModel', + request_serializer=endpoint_service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/UndeployModel', + request_serializer=endpoint_service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + +__all__ = ( + 'EndpointServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..f5ac9f5a71 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py @@ -0,0 +1,434 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.longrunning import operations_pb2 # type: ignore +from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import EndpointServiceGrpcTransport + + +class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport): + """gRPC AsyncIO backend transport for EndpointService. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_endpoint(self) -> Callable[ + [endpoint_service.CreateEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create endpoint method over gRPC. + + Creates an Endpoint. + + Returns: + Callable[[~.CreateEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_endpoint' not in self._stubs: + self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/CreateEndpoint', + request_serializer=endpoint_service.CreateEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_endpoint'] + + @property + def get_endpoint(self) -> Callable[ + [endpoint_service.GetEndpointRequest], + Awaitable[endpoint.Endpoint]]: + r"""Return a callable for the get endpoint method over gRPC. + + Gets an Endpoint. + + Returns: + Callable[[~.GetEndpointRequest], + Awaitable[~.Endpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_endpoint' not in self._stubs: + self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/GetEndpoint', + request_serializer=endpoint_service.GetEndpointRequest.serialize, + response_deserializer=endpoint.Endpoint.deserialize, + ) + return self._stubs['get_endpoint'] + + @property + def list_endpoints(self) -> Callable[ + [endpoint_service.ListEndpointsRequest], + Awaitable[endpoint_service.ListEndpointsResponse]]: + r"""Return a callable for the list endpoints method over gRPC. + + Lists Endpoints in a Location. + + Returns: + Callable[[~.ListEndpointsRequest], + Awaitable[~.ListEndpointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_endpoints' not in self._stubs: + self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/ListEndpoints', + request_serializer=endpoint_service.ListEndpointsRequest.serialize, + response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, + ) + return self._stubs['list_endpoints'] + + @property + def update_endpoint(self) -> Callable[ + [endpoint_service.UpdateEndpointRequest], + Awaitable[gca_endpoint.Endpoint]]: + r"""Return a callable for the update endpoint method over gRPC. + + Updates an Endpoint. + + Returns: + Callable[[~.UpdateEndpointRequest], + Awaitable[~.Endpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_endpoint' not in self._stubs: + self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/UpdateEndpoint', + request_serializer=endpoint_service.UpdateEndpointRequest.serialize, + response_deserializer=gca_endpoint.Endpoint.deserialize, + ) + return self._stubs['update_endpoint'] + + @property + def delete_endpoint(self) -> Callable[ + [endpoint_service.DeleteEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete endpoint method over gRPC. + + Deletes an Endpoint. + + Returns: + Callable[[~.DeleteEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_endpoint' not in self._stubs: + self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/DeleteEndpoint', + request_serializer=endpoint_service.DeleteEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_endpoint'] + + @property + def deploy_model(self) -> Callable[ + [endpoint_service.DeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the deploy model method over gRPC. + + Deploys a Model into this Endpoint, creating a + DeployedModel within it. + + Returns: + Callable[[~.DeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_model' not in self._stubs: + self._stubs['deploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/DeployModel', + request_serializer=endpoint_service.DeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_model'] + + @property + def undeploy_model(self) -> Callable[ + [endpoint_service.UndeployModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undeploy model method over gRPC. + + Undeploys a Model from an Endpoint, removing a + DeployedModel from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_model' not in self._stubs: + self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.EndpointService/UndeployModel', + request_serializer=endpoint_service.UndeployModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_model'] + + +__all__ = ( + 'EndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py new file mode 100644 index 0000000000..e009ebaec2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FeaturestoreOnlineServingServiceClient +from .async_client import FeaturestoreOnlineServingServiceAsyncClient + +__all__ = ( + 'FeaturestoreOnlineServingServiceClient', + 'FeaturestoreOnlineServingServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py new file mode 100644 index 0000000000..3f762760bc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py @@ -0,0 +1,325 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport +from .client import FeaturestoreOnlineServingServiceClient + + +class FeaturestoreOnlineServingServiceAsyncClient: + """A service for serving online feature values.""" + + _client: FeaturestoreOnlineServingServiceClient + + DEFAULT_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.entity_type_path) + parse_entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_entity_type_path) + common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_organization_path) + common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_project_path) + parse_common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_project_path) + common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_location_path) + parse_common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceAsyncClient: The constructed client. + """ + return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(FeaturestoreOnlineServingServiceClient).get_transport_class, type(FeaturestoreOnlineServingServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, FeaturestoreOnlineServingServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = FeaturestoreOnlineServingServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def read_feature_values(self, + request: featurestore_online_service.ReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + "user". + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_online_service.ReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def streaming_read_feature_values(self, + request: featurestore_online_service.StreamingReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[featurestore_online_service.ReadFeatureValuesResponse]]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (:class:`str`): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + "user". + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.streaming_read_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreOnlineServingServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py new file mode 100644 index 0000000000..51db2f2c04 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py @@ -0,0 +1,512 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + +class FeaturestoreOnlineServingServiceClientMeta(type): + """Metaclass for the FeaturestoreOnlineServingService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] + _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport + _transport_registry["grpc_asyncio"] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[FeaturestoreOnlineServingServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreOnlineServingServiceClient(metaclass=FeaturestoreOnlineServingServiceClientMeta): + """A service for serving online feature values.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreOnlineServingServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreOnlineServingServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreOnlineServingServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: + """Returns a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str,str]: + """Parses a entity_type path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FeaturestoreOnlineServingServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore online serving service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreOnlineServingServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreOnlineServingServiceTransport): + # transport is a FeaturestoreOnlineServingServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def read_feature_values(self, + request: featurestore_online_service.ReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore_online_service.ReadFeatureValuesResponse: + r"""Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest): + The request object. Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType for the + entity being read. Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + "user". + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.ReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_online_service.ReadFeatureValuesRequest): + request = featurestore_online_service.ReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def streaming_read_feature_values(self, + request: featurestore_online_service.StreamingReadFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[featurestore_online_service.ReadFeatureValuesResponse]: + r"""Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Args: + request (google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest): + The request object. Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting + user clicks on a website, an EntityType ID could be + "user". + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: + Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_online_service.StreamingReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_online_service.StreamingReadFeatureValuesRequest): + request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.streaming_read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreOnlineServingServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py new file mode 100644 index 0000000000..d1abcd0c43 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreOnlineServingServiceTransport +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport +from .grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] +_transport_registry['grpc'] = FeaturestoreOnlineServingServiceGrpcTransport +_transport_registry['grpc_asyncio'] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport + +__all__ = ( + 'FeaturestoreOnlineServingServiceTransport', + 'FeaturestoreOnlineServingServiceGrpcTransport', + 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py new file mode 100644 index 0000000000..47ba2e7efb --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class FeaturestoreOnlineServingServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreOnlineServingService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.read_feature_values: gapic_v1.method.wrap_method( + self.read_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + self.streaming_read_feature_values: gapic_v1.method.wrap_method( + self.streaming_read_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def read_feature_values(self) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + Union[ + featurestore_online_service.ReadFeatureValuesResponse, + Awaitable[featurestore_online_service.ReadFeatureValuesResponse] + ]]: + raise NotImplementedError() + + @property + def streaming_read_feature_values(self) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + Union[ + featurestore_online_service.ReadFeatureValuesResponse, + Awaitable[featurestore_online_service.ReadFeatureValuesResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'FeaturestoreOnlineServingServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py new file mode 100644 index 0000000000..83868dcafc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py @@ -0,0 +1,283 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreOnlineServingServiceGrpcTransport(FeaturestoreOnlineServingServiceTransport): + """gRPC backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def read_feature_values(self) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_feature_values' not in self._stubs: + self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues', + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['read_feature_values'] + + @property + def streaming_read_feature_values(self) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + featurestore_online_service.ReadFeatureValuesResponse]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + ~.ReadFeatureValuesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'streaming_read_feature_values' not in self._stubs: + self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['streaming_read_feature_values'] + + +__all__ = ( + 'FeaturestoreOnlineServingServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..e040cb7a76 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreOnlineServingServiceGrpcTransport + + +class FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(FeaturestoreOnlineServingServiceTransport): + """gRPC AsyncIO backend transport for FeaturestoreOnlineServingService. + + A service for serving online feature values. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def read_feature_values(self) -> Callable[ + [featurestore_online_service.ReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: + r"""Return a callable for the read feature values method over gRPC. + + Reads Feature values of a specific entity of an + EntityType. For reading feature values of multiple + entities of an EntityType, please use + StreamingReadFeatureValues. + + Returns: + Callable[[~.ReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_feature_values' not in self._stubs: + self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues', + request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['read_feature_values'] + + @property + def streaming_read_feature_values(self) -> Callable[ + [featurestore_online_service.StreamingReadFeatureValuesRequest], + Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: + r"""Return a callable for the streaming read feature values method over gRPC. + + Reads Feature values for multiple entities. Depending + on their size, data for different entities may be broken + up across multiple responses. + + Returns: + Callable[[~.StreamingReadFeatureValuesRequest], + Awaitable[~.ReadFeatureValuesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'streaming_read_feature_values' not in self._stubs: + self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', + request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, + response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, + ) + return self._stubs['streaming_read_feature_values'] + + +__all__ = ( + 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py new file mode 100644 index 0000000000..81716ce8fe --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FeaturestoreServiceClient +from .async_client import FeaturestoreServiceAsyncClient + +__all__ = ( + 'FeaturestoreServiceClient', + 'FeaturestoreServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py new file mode 100644 index 0000000000..52057b0876 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -0,0 +1,2060 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport +from .client import FeaturestoreServiceClient + + +class FeaturestoreServiceAsyncClient: + """The service that handles CRUD and List for resources for + Featurestore. + """ + + _client: FeaturestoreServiceClient + + DEFAULT_ENDPOINT = FeaturestoreServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = FeaturestoreServiceClient.DEFAULT_MTLS_ENDPOINT + + entity_type_path = staticmethod(FeaturestoreServiceClient.entity_type_path) + parse_entity_type_path = staticmethod(FeaturestoreServiceClient.parse_entity_type_path) + feature_path = staticmethod(FeaturestoreServiceClient.feature_path) + parse_feature_path = staticmethod(FeaturestoreServiceClient.parse_feature_path) + featurestore_path = staticmethod(FeaturestoreServiceClient.featurestore_path) + parse_featurestore_path = staticmethod(FeaturestoreServiceClient.parse_featurestore_path) + common_billing_account_path = staticmethod(FeaturestoreServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(FeaturestoreServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(FeaturestoreServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(FeaturestoreServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(FeaturestoreServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(FeaturestoreServiceClient.parse_common_organization_path) + common_project_path = staticmethod(FeaturestoreServiceClient.common_project_path) + parse_common_project_path = staticmethod(FeaturestoreServiceClient.parse_common_project_path) + common_location_path = staticmethod(FeaturestoreServiceClient.common_location_path) + parse_common_location_path = staticmethod(FeaturestoreServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceAsyncClient: The constructed client. + """ + return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(FeaturestoreServiceClient).get_transport_class, type(FeaturestoreServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, FeaturestoreServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = FeaturestoreServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_featurestore(self, + request: featurestore_service.CreateFeaturestoreRequest = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Featurestore in a given project and + location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. + parent (:class:`str`): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` + Featurestore configuration information on how the + Featurestore is configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.CreateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_featurestore(self, + request: featurestore_service.GetFeaturestoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. + name (:class:`str`): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Featurestore: + Featurestore configuration + information on how the Featurestore is + configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.GetFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_featurestores(self, + request: featurestore_service.ListFeaturestoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresAsyncPager: + r"""Lists Featurestores in a given project and location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest`): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + parent (:class:`str`): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ListFeaturestoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_featurestores, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturestoresAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_featurestore(self, + request: featurestore_service.UpdateFeaturestoreRequest = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates the parameters of a single Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. + featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` + Featurestore configuration information on how the + Featurestore is configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.UpdateFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore.name", request.featurestore.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_featurestore(self, + request: featurestore_service.DeleteFeaturestoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest`): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. + name (:class:`str`): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.DeleteFeaturestoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_featurestore, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_entity_type(self, + request: featurestore_service.CreateEntityTypeRequest = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new EntityType in a given Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. + parent (:class:`str`): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.CreateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_entity_type(self, + request: featurestore_service.GetEntityTypeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. + name (:class:`str`): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.GetEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_entity_types(self, + request: featurestore_service.ListEntityTypesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesAsyncPager: + r"""Lists EntityTypes in a given Featurestore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest`): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + parent (:class:`str`): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesAsyncPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ListEntityTypesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_entity_types, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListEntityTypesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_entity_type(self, + request: featurestore_service.UpdateEntityTypeRequest = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. + entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.UpdateEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type.name", request.entity_type.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_entity_type(self, + request: featurestore_service.DeleteEntityTypeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest`): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (:class:`str`): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.DeleteEntityTypeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_entity_type, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_feature(self, + request: featurestore_service.CreateFeatureRequest = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new Feature in a given EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest`): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.CreateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_create_features(self, + request: featurestore_service.BatchCreateFeaturesRequest = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a batch of Features in a given EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest`): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + parent (:class:`str`): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]`): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.BatchCreateFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests: + request.requests.extend(requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_create_features, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_feature(self, + request: featurestore_service.GetFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetFeatureRequest`): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. + name (:class:`str`): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.GetFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_features(self, + request: featurestore_service.ListFeaturesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesAsyncPager: + r"""Lists Features in a given EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest`): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + parent (:class:`str`): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesAsyncPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ListFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_features, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListFeaturesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_feature(self, + request: featurestore_service.UpdateFeatureRequest = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest`): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. + feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.UpdateFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("feature.name", request.feature.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_feature(self, + request: featurestore_service.DeleteFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single Feature. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest`): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. + name (:class:`str`): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.DeleteFeatureRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_feature, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def import_feature_values(self, + request: featurestore_service.ImportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ImportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def batch_read_feature_values(self, + request: featurestore_service.BatchReadFeatureValuesRequest = None, + *, + featurestore: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + (- Next Id: 6 -) + featurestore (:class:`str`): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.BatchReadFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_read_feature_values, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore", request.featurestore), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_feature_values(self, + request: featurestore_service.ExportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports Feature values from all the entities of a + target EntityType. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest`): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + entity_type (:class:`str`): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.ExportFeatureValuesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_feature_values, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + async def search_features(self, + request: featurestore_service.SearchFeaturesRequest = None, + *, + location: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesAsyncPager: + r"""Searches Features matching a query in a given + project. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest`): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + location (:class:`str`): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = featurestore_service.SearchFeaturesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if location is not None: + request.location = location + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_features, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("location", request.location), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchFeaturesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py new file mode 100644 index 0000000000..7fdfc4238d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -0,0 +1,2265 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import FeaturestoreServiceGrpcTransport +from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +class FeaturestoreServiceClientMeta(type): + """Metaclass for the FeaturestoreService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] + _transport_registry["grpc"] = FeaturestoreServiceGrpcTransport + _transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[FeaturestoreServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FeaturestoreServiceClient(metaclass=FeaturestoreServiceClientMeta): + """The service that handles CRUD and List for resources for + Featurestore. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FeaturestoreServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FeaturestoreServiceTransport: + """Returns the transport used by the client instance. + + Returns: + FeaturestoreServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: + """Returns a fully-qualified entity_type string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + + @staticmethod + def parse_entity_type_path(path: str) -> Dict[str,str]: + """Parses a entity_type path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def feature_path(project: str,location: str,featurestore: str,entity_type: str,feature: str,) -> str: + """Returns a fully-qualified feature string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) + + @staticmethod + def parse_feature_path(path: str) -> Dict[str,str]: + """Parses a feature path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)/features/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def featurestore_path(project: str,location: str,featurestore: str,) -> str: + """Returns a fully-qualified featurestore string.""" + return "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) + + @staticmethod + def parse_featurestore_path(path: str) -> Dict[str,str]: + """Parses a featurestore path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FeaturestoreServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the featurestore service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FeaturestoreServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FeaturestoreServiceTransport): + # transport is a FeaturestoreServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_featurestore(self, + request: featurestore_service.CreateFeaturestoreRequest = None, + *, + parent: str = None, + featurestore: gca_featurestore.Featurestore = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Featurestore in a given project and + location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest): + The request object. Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore to create. + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` + Featurestore configuration information on how the + Featurestore is configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, featurestore]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeaturestoreRequest): + request = featurestore_service.CreateFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_featurestore(self, + request: featurestore_service.GetFeaturestoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> featurestore.Featurestore: + r"""Gets details of a single Featurestore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest): + The request object. Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. + name (str): + Required. The name of the + Featurestore resource. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Featurestore: + Featurestore configuration + information on how the Featurestore is + configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeaturestoreRequest): + request = featurestore_service.GetFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_featurestores(self, + request: featurestore_service.ListFeaturestoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturestoresPager: + r"""Lists Featurestores in a given project and location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): + The request object. Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresPager: + Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturestoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturestoresRequest): + request = featurestore_service.ListFeaturestoresRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_featurestores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturestoresPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_featurestore(self, + request: featurestore_service.UpdateFeaturestoreRequest = None, + *, + featurestore: gca_featurestore.Featurestore = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates the parameters of a single Featurestore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest): + The request object. Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Featurestore resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` + Featurestore configuration information on how the + Featurestore is configured. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeaturestoreRequest): + request = featurestore_service.UpdateFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore.name", request.featurestore.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_featurestore.Featurestore, + metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_featurestore(self, + request: featurestore_service.DeleteFeaturestoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest): + The request object. Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeaturestoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeaturestoreRequest): + request = featurestore_service.DeleteFeaturestoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_featurestore] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_entity_type(self, + request: featurestore_service.CreateEntityTypeRequest = None, + *, + parent: str = None, + entity_type: gca_entity_type.EntityType = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new EntityType in a given Featurestore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest): + The request object. Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. + parent (str): + Required. The resource name of the Featurestore to + create EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + The EntityType to create. + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and + have stored information about. For example, driver is + an entity type, and driver0 is an instance of an + entity type driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateEntityTypeRequest): + request = featurestore_service.CreateEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_entity_type.EntityType, + metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, + ) + + # Done; return the response. + return response + + def get_entity_type(self, + request: featurestore_service.GetEntityTypeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> entity_type.EntityType: + r"""Gets details of a single EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest): + The request object. Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetEntityTypeRequest): + request = featurestore_service.GetEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_entity_types(self, + request: featurestore_service.ListEntityTypesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListEntityTypesPager: + r"""Lists EntityTypes in a given Featurestore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): + The request object. Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesPager: + Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListEntityTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListEntityTypesRequest): + request = featurestore_service.ListEntityTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_entity_types] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListEntityTypesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_entity_type(self, + request: featurestore_service.UpdateEntityTypeRequest = None, + *, + entity_type: gca_entity_type.EntityType = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_entity_type.EntityType: + r"""Updates the parameters of a single EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest): + The request object. Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the EntityType resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EntityType: + An entity type is a type of object in + a system that needs to be modeled and + have stored information about. For + example, driver is an entity type, and + driver0 is an instance of an entity type + driver. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateEntityTypeRequest): + request = featurestore_service.UpdateEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type.name", request.entity_type.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_entity_type(self, + request: featurestore_service.DeleteEntityTypeRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest): + The request object. Request message for + [FeaturestoreService.DeleteEntityTypes][]. + name (str): + Required. The name of the EntityType to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteEntityTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteEntityTypeRequest): + request = featurestore_service.DeleteEntityTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_entity_type] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_feature(self, + request: featurestore_service.CreateFeatureRequest = None, + *, + parent: str = None, + feature: gca_feature.Feature = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a new Feature in a given EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest): + The request object. Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. + parent (str): + Required. The resource name of the EntityType to create + a Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature to create. + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. + For example, apple is an entity type, and color is a + feature that describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, feature]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.CreateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.CreateFeatureRequest): + request = featurestore_service.CreateFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if feature is not None: + request.feature = feature + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_feature.Feature, + metadata_type=featurestore_service.CreateFeatureOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_create_features(self, + request: featurestore_service.BatchCreateFeaturesRequest = None, + *, + parent: str = None, + requests: Sequence[featurestore_service.CreateFeatureRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a batch of Features in a given EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest): + The request object. Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + parent (str): + Required. The resource name of the EntityType to create + the batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same + parent EntityType. The ``parent`` field in each child + request message can be omitted. If ``parent`` is set in + a child request, then the value must match the + ``parent`` value in this request message. + + This corresponds to the ``requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` + Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchCreateFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchCreateFeaturesRequest): + request = featurestore_service.BatchCreateFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if requests is not None: + request.requests = requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_create_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchCreateFeaturesResponse, + metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, + ) + + # Done; return the response. + return response + + def get_feature(self, + request: featurestore_service.GetFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> feature.Feature: + r"""Gets details of a single Feature. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetFeatureRequest): + The request object. Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.GetFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.GetFeatureRequest): + request = featurestore_service.GetFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_features(self, + request: featurestore_service.ListFeaturesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListFeaturesPager: + r"""Lists Features in a given EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): + The request object. Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesPager: + Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ListFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ListFeaturesRequest): + request = featurestore_service.ListFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListFeaturesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_feature(self, + request: featurestore_service.UpdateFeatureRequest = None, + *, + feature: gca_feature.Feature = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_feature.Feature: + r"""Updates the parameters of a single Feature. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest): + The request object. Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature's ``name`` field is used to + identify the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``feature`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be + overwritten in the Features resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then only the non-empty fields present in + the request will be overwritten. Set the update_mask to + ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Feature: + Feature Metadata information that + describes an attribute of an entity + type. For example, apple is an entity + type, and color is a feature that + describes apple. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([feature, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.UpdateFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.UpdateFeatureRequest): + request = featurestore_service.UpdateFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if feature is not None: + request.feature = feature + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("feature.name", request.feature.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_feature(self, + request: featurestore_service.DeleteFeatureRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single Feature. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest): + The request object. Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. + name (str): + Required. The name of the Features to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.DeleteFeatureRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.DeleteFeatureRequest): + request = featurestore_service.DeleteFeatureRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_feature] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def import_feature_values(self, + request: featurestore_service.ImportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest): + The request object. Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType grouping + the Features for which values are being imported. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` + Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ImportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ImportFeatureValuesRequest): + request = featurestore_service.ImportFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ImportFeatureValuesResponse, + metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def batch_read_feature_values(self, + request: featurestore_service.BatchReadFeatureValuesRequest = None, + *, + featurestore: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Args: + request (google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest): + The request object. Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + (- Next Id: 6 -) + featurestore (str): + Required. The resource name of the Featurestore from + which to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + + This corresponds to the ``featurestore`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` + Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([featurestore]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.BatchReadFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.BatchReadFeatureValuesRequest): + request = featurestore_service.BatchReadFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if featurestore is not None: + request.featurestore = featurestore + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_read_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("featurestore", request.featurestore), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.BatchReadFeatureValuesResponse, + metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def export_feature_values(self, + request: featurestore_service.ExportFeatureValuesRequest = None, + *, + entity_type: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports Feature values from all the entities of a + target EntityType. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest): + The request object. Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + entity_type (str): + Required. The resource name of the EntityType from which + to export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + This corresponds to the ``entity_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` + Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([entity_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.ExportFeatureValuesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.ExportFeatureValuesRequest): + request = featurestore_service.ExportFeatureValuesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if entity_type is not None: + request.entity_type = entity_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_feature_values] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("entity_type", request.entity_type), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + featurestore_service.ExportFeatureValuesResponse, + metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, + ) + + # Done; return the response. + return response + + def search_features(self, + request: featurestore_service.SearchFeaturesRequest = None, + *, + location: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchFeaturesPager: + r"""Searches Features matching a query in a given + project. + + Args: + request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): + The request object. Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesPager: + Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([location]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a featurestore_service.SearchFeaturesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, featurestore_service.SearchFeaturesRequest): + request = featurestore_service.SearchFeaturesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if location is not None: + request.location = location + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_features] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("location", request.location), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchFeaturesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FeaturestoreServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py new file mode 100644 index 0000000000..6a272c4602 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py @@ -0,0 +1,509 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service + + +class ListFeaturestoresPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.ListFeaturestoresResponse], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[featurestore.Featurestore]: + for page in self.pages: + yield from page.featurestores + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListFeaturestoresAsyncPager: + """A pager for iterating through ``list_featurestores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``featurestores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeaturestores`` requests and continue to iterate + through the ``featurestores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.ListFeaturestoresResponse]], + request: featurestore_service.ListFeaturestoresRequest, + response: featurestore_service.ListFeaturestoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturestoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[featurestore_service.ListFeaturestoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[featurestore.Featurestore]: + async def async_generator(): + async for page in self.pages: + for response in page.featurestores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEntityTypesPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.ListEntityTypesResponse], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[entity_type.EntityType]: + for page in self.pages: + yield from page.entity_types + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListEntityTypesAsyncPager: + """A pager for iterating through ``list_entity_types`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``entity_types`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListEntityTypes`` requests and continue to iterate + through the ``entity_types`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.ListEntityTypesResponse]], + request: featurestore_service.ListEntityTypesRequest, + response: featurestore_service.ListEntityTypesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListEntityTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[featurestore_service.ListEntityTypesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[entity_type.EntityType]: + async def async_generator(): + async for page in self.pages: + for response in page.entity_types: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListFeaturesPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.ListFeaturesResponse], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListFeaturesAsyncPager: + """A pager for iterating through ``list_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.ListFeaturesResponse]], + request: featurestore_service.ListFeaturesRequest, + response: featurestore_service.ListFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.ListFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[featurestore_service.ListFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchFeaturesPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., featurestore_service.SearchFeaturesResponse], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[feature.Feature]: + for page in self.pages: + yield from page.features + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchFeaturesAsyncPager: + """A pager for iterating through ``search_features`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``features`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchFeatures`` requests and continue to iterate + through the ``features`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[featurestore_service.SearchFeaturesResponse]], + request: featurestore_service.SearchFeaturesRequest, + response: featurestore_service.SearchFeaturesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = featurestore_service.SearchFeaturesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[featurestore_service.SearchFeaturesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[feature.Feature]: + async def async_generator(): + async for page in self.pages: + for response in page.features: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py new file mode 100644 index 0000000000..e8a1ff1b03 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FeaturestoreServiceTransport +from .grpc import FeaturestoreServiceGrpcTransport +from .grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] +_transport_registry['grpc'] = FeaturestoreServiceGrpcTransport +_transport_registry['grpc_asyncio'] = FeaturestoreServiceGrpcAsyncIOTransport + +__all__ = ( + 'FeaturestoreServiceTransport', + 'FeaturestoreServiceGrpcTransport', + 'FeaturestoreServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py new file mode 100644 index 0000000000..9dca966c09 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py @@ -0,0 +1,446 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class FeaturestoreServiceTransport(abc.ABC): + """Abstract transport class for FeaturestoreService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_featurestore: gapic_v1.method.wrap_method( + self.create_featurestore, + default_timeout=5.0, + client_info=client_info, + ), + self.get_featurestore: gapic_v1.method.wrap_method( + self.get_featurestore, + default_timeout=5.0, + client_info=client_info, + ), + self.list_featurestores: gapic_v1.method.wrap_method( + self.list_featurestores, + default_timeout=5.0, + client_info=client_info, + ), + self.update_featurestore: gapic_v1.method.wrap_method( + self.update_featurestore, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_featurestore: gapic_v1.method.wrap_method( + self.delete_featurestore, + default_timeout=5.0, + client_info=client_info, + ), + self.create_entity_type: gapic_v1.method.wrap_method( + self.create_entity_type, + default_timeout=5.0, + client_info=client_info, + ), + self.get_entity_type: gapic_v1.method.wrap_method( + self.get_entity_type, + default_timeout=5.0, + client_info=client_info, + ), + self.list_entity_types: gapic_v1.method.wrap_method( + self.list_entity_types, + default_timeout=5.0, + client_info=client_info, + ), + self.update_entity_type: gapic_v1.method.wrap_method( + self.update_entity_type, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_entity_type: gapic_v1.method.wrap_method( + self.delete_entity_type, + default_timeout=5.0, + client_info=client_info, + ), + self.create_feature: gapic_v1.method.wrap_method( + self.create_feature, + default_timeout=5.0, + client_info=client_info, + ), + self.batch_create_features: gapic_v1.method.wrap_method( + self.batch_create_features, + default_timeout=5.0, + client_info=client_info, + ), + self.get_feature: gapic_v1.method.wrap_method( + self.get_feature, + default_timeout=5.0, + client_info=client_info, + ), + self.list_features: gapic_v1.method.wrap_method( + self.list_features, + default_timeout=5.0, + client_info=client_info, + ), + self.update_feature: gapic_v1.method.wrap_method( + self.update_feature, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_feature: gapic_v1.method.wrap_method( + self.delete_feature, + default_timeout=5.0, + client_info=client_info, + ), + self.import_feature_values: gapic_v1.method.wrap_method( + self.import_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + self.batch_read_feature_values: gapic_v1.method.wrap_method( + self.batch_read_feature_values, + default_timeout=5.0, + client_info=client_info, + ), + self.export_feature_values: gapic_v1.method.wrap_method( + self.export_feature_values, + default_timeout=None, + client_info=client_info, + ), + self.search_features: gapic_v1.method.wrap_method( + self.search_features, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_featurestore(self) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_featurestore(self) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + Union[ + featurestore.Featurestore, + Awaitable[featurestore.Featurestore] + ]]: + raise NotImplementedError() + + @property + def list_featurestores(self) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + Union[ + featurestore_service.ListFeaturestoresResponse, + Awaitable[featurestore_service.ListFeaturestoresResponse] + ]]: + raise NotImplementedError() + + @property + def update_featurestore(self) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_featurestore(self) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_entity_type(self) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_entity_type(self) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + Union[ + entity_type.EntityType, + Awaitable[entity_type.EntityType] + ]]: + raise NotImplementedError() + + @property + def list_entity_types(self) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + Union[ + featurestore_service.ListEntityTypesResponse, + Awaitable[featurestore_service.ListEntityTypesResponse] + ]]: + raise NotImplementedError() + + @property + def update_entity_type(self) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + Union[ + gca_entity_type.EntityType, + Awaitable[gca_entity_type.EntityType] + ]]: + raise NotImplementedError() + + @property + def delete_entity_type(self) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_feature(self) -> Callable[ + [featurestore_service.CreateFeatureRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_create_features(self) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_feature(self) -> Callable[ + [featurestore_service.GetFeatureRequest], + Union[ + feature.Feature, + Awaitable[feature.Feature] + ]]: + raise NotImplementedError() + + @property + def list_features(self) -> Callable[ + [featurestore_service.ListFeaturesRequest], + Union[ + featurestore_service.ListFeaturesResponse, + Awaitable[featurestore_service.ListFeaturesResponse] + ]]: + raise NotImplementedError() + + @property + def update_feature(self) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + Union[ + gca_feature.Feature, + Awaitable[gca_feature.Feature] + ]]: + raise NotImplementedError() + + @property + def delete_feature(self) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def import_feature_values(self) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def batch_read_feature_values(self) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_feature_values(self) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def search_features(self) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + Union[ + featurestore_service.SearchFeaturesResponse, + Awaitable[featurestore_service.SearchFeaturesResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'FeaturestoreServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py new file mode 100644 index 0000000000..771cbf2737 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py @@ -0,0 +1,803 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.longrunning import operations_pb2 # type: ignore +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO + + +class FeaturestoreServiceGrpcTransport(FeaturestoreServiceTransport): + """gRPC backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore(self) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_featurestore' not in self._stubs: + self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore', + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_featurestore'] + + @property + def get_featurestore(self) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + featurestore.Featurestore]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + ~.Featurestore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_featurestore' not in self._stubs: + self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore', + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs['get_featurestore'] + + @property + def list_featurestores(self) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + featurestore_service.ListFeaturestoresResponse]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + ~.ListFeaturestoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_featurestores' not in self._stubs: + self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores', + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs['list_featurestores'] + + @property + def update_featurestore(self) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_featurestore' not in self._stubs: + self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore', + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_featurestore'] + + @property + def delete_featurestore(self) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_featurestore' not in self._stubs: + self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore', + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_featurestore'] + + @property + def create_entity_type(self) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + operations_pb2.Operation]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_entity_type' not in self._stubs: + self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType', + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_entity_type'] + + @property + def get_entity_type(self) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + entity_type.EntityType]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_entity_type' not in self._stubs: + self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType', + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs['get_entity_type'] + + @property + def list_entity_types(self) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + featurestore_service.ListEntityTypesResponse]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + ~.ListEntityTypesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_entity_types' not in self._stubs: + self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes', + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs['list_entity_types'] + + @property + def update_entity_type(self) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + gca_entity_type.EntityType]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + ~.EntityType]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_entity_type' not in self._stubs: + self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType', + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs['update_entity_type'] + + @property + def delete_entity_type(self) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_entity_type' not in self._stubs: + self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType', + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_entity_type'] + + @property + def create_feature(self) -> Callable[ + [featurestore_service.CreateFeatureRequest], + operations_pb2.Operation]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_feature' not in self._stubs: + self._stubs['create_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature', + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_feature'] + + @property + def batch_create_features(self) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_features' not in self._stubs: + self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures', + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_create_features'] + + @property + def get_feature(self) -> Callable[ + [featurestore_service.GetFeatureRequest], + feature.Feature]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_feature' not in self._stubs: + self._stubs['get_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature', + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs['get_feature'] + + @property + def list_features(self) -> Callable[ + [featurestore_service.ListFeaturesRequest], + featurestore_service.ListFeaturesResponse]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + ~.ListFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_features' not in self._stubs: + self._stubs['list_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures', + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs['list_features'] + + @property + def update_feature(self) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + gca_feature.Feature]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + ~.Feature]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_feature' not in self._stubs: + self._stubs['update_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature', + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs['update_feature'] + + @property + def delete_feature(self) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_feature' not in self._stubs: + self._stubs['delete_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature', + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_feature'] + + @property + def import_feature_values(self) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + operations_pb2.Operation]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_feature_values' not in self._stubs: + self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues', + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_feature_values'] + + @property + def batch_read_feature_values(self) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_feature_values' not in self._stubs: + self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues', + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_read_feature_values'] + + @property + def export_feature_values(self) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + operations_pb2.Operation]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_feature_values' not in self._stubs: + self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues', + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_feature_values'] + + @property + def search_features(self) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + featurestore_service.SearchFeaturesResponse]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + ~.SearchFeaturesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_features' not in self._stubs: + self._stubs['search_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures', + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs['search_features'] + + +__all__ = ( + 'FeaturestoreServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..a73c769f4d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py @@ -0,0 +1,807 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.longrunning import operations_pb2 # type: ignore +from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import FeaturestoreServiceGrpcTransport + + +class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport): + """gRPC AsyncIO backend transport for FeaturestoreService. + + The service that handles CRUD and List for resources for + Featurestore. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_featurestore(self) -> Callable[ + [featurestore_service.CreateFeaturestoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create featurestore method over gRPC. + + Creates a new Featurestore in a given project and + location. + + Returns: + Callable[[~.CreateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_featurestore' not in self._stubs: + self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore', + request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_featurestore'] + + @property + def get_featurestore(self) -> Callable[ + [featurestore_service.GetFeaturestoreRequest], + Awaitable[featurestore.Featurestore]]: + r"""Return a callable for the get featurestore method over gRPC. + + Gets details of a single Featurestore. + + Returns: + Callable[[~.GetFeaturestoreRequest], + Awaitable[~.Featurestore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_featurestore' not in self._stubs: + self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore', + request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, + response_deserializer=featurestore.Featurestore.deserialize, + ) + return self._stubs['get_featurestore'] + + @property + def list_featurestores(self) -> Callable[ + [featurestore_service.ListFeaturestoresRequest], + Awaitable[featurestore_service.ListFeaturestoresResponse]]: + r"""Return a callable for the list featurestores method over gRPC. + + Lists Featurestores in a given project and location. + + Returns: + Callable[[~.ListFeaturestoresRequest], + Awaitable[~.ListFeaturestoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_featurestores' not in self._stubs: + self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores', + request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, + response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, + ) + return self._stubs['list_featurestores'] + + @property + def update_featurestore(self) -> Callable[ + [featurestore_service.UpdateFeaturestoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update featurestore method over gRPC. + + Updates the parameters of a single Featurestore. + + Returns: + Callable[[~.UpdateFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_featurestore' not in self._stubs: + self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore', + request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_featurestore'] + + @property + def delete_featurestore(self) -> Callable[ + [featurestore_service.DeleteFeaturestoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete featurestore method over gRPC. + + Deletes a single Featurestore. The Featurestore must not contain + any EntityTypes or ``force`` must be set to true for the request + to succeed. + + Returns: + Callable[[~.DeleteFeaturestoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_featurestore' not in self._stubs: + self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore', + request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_featurestore'] + + @property + def create_entity_type(self) -> Callable[ + [featurestore_service.CreateEntityTypeRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create entity type method over gRPC. + + Creates a new EntityType in a given Featurestore. + + Returns: + Callable[[~.CreateEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_entity_type' not in self._stubs: + self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType', + request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_entity_type'] + + @property + def get_entity_type(self) -> Callable[ + [featurestore_service.GetEntityTypeRequest], + Awaitable[entity_type.EntityType]]: + r"""Return a callable for the get entity type method over gRPC. + + Gets details of a single EntityType. + + Returns: + Callable[[~.GetEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_entity_type' not in self._stubs: + self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType', + request_serializer=featurestore_service.GetEntityTypeRequest.serialize, + response_deserializer=entity_type.EntityType.deserialize, + ) + return self._stubs['get_entity_type'] + + @property + def list_entity_types(self) -> Callable[ + [featurestore_service.ListEntityTypesRequest], + Awaitable[featurestore_service.ListEntityTypesResponse]]: + r"""Return a callable for the list entity types method over gRPC. + + Lists EntityTypes in a given Featurestore. + + Returns: + Callable[[~.ListEntityTypesRequest], + Awaitable[~.ListEntityTypesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_entity_types' not in self._stubs: + self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes', + request_serializer=featurestore_service.ListEntityTypesRequest.serialize, + response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, + ) + return self._stubs['list_entity_types'] + + @property + def update_entity_type(self) -> Callable[ + [featurestore_service.UpdateEntityTypeRequest], + Awaitable[gca_entity_type.EntityType]]: + r"""Return a callable for the update entity type method over gRPC. + + Updates the parameters of a single EntityType. + + Returns: + Callable[[~.UpdateEntityTypeRequest], + Awaitable[~.EntityType]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_entity_type' not in self._stubs: + self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType', + request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, + response_deserializer=gca_entity_type.EntityType.deserialize, + ) + return self._stubs['update_entity_type'] + + @property + def delete_entity_type(self) -> Callable[ + [featurestore_service.DeleteEntityTypeRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete entity type method over gRPC. + + Deletes a single EntityType. The EntityType must not have any + Features or ``force`` must be set to true for the request to + succeed. + + Returns: + Callable[[~.DeleteEntityTypeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_entity_type' not in self._stubs: + self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType', + request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_entity_type'] + + @property + def create_feature(self) -> Callable[ + [featurestore_service.CreateFeatureRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create feature method over gRPC. + + Creates a new Feature in a given EntityType. + + Returns: + Callable[[~.CreateFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_feature' not in self._stubs: + self._stubs['create_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature', + request_serializer=featurestore_service.CreateFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_feature'] + + @property + def batch_create_features(self) -> Callable[ + [featurestore_service.BatchCreateFeaturesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch create features method over gRPC. + + Creates a batch of Features in a given EntityType. + + Returns: + Callable[[~.BatchCreateFeaturesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_create_features' not in self._stubs: + self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures', + request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_create_features'] + + @property + def get_feature(self) -> Callable[ + [featurestore_service.GetFeatureRequest], + Awaitable[feature.Feature]]: + r"""Return a callable for the get feature method over gRPC. + + Gets details of a single Feature. + + Returns: + Callable[[~.GetFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_feature' not in self._stubs: + self._stubs['get_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature', + request_serializer=featurestore_service.GetFeatureRequest.serialize, + response_deserializer=feature.Feature.deserialize, + ) + return self._stubs['get_feature'] + + @property + def list_features(self) -> Callable[ + [featurestore_service.ListFeaturesRequest], + Awaitable[featurestore_service.ListFeaturesResponse]]: + r"""Return a callable for the list features method over gRPC. + + Lists Features in a given EntityType. + + Returns: + Callable[[~.ListFeaturesRequest], + Awaitable[~.ListFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_features' not in self._stubs: + self._stubs['list_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures', + request_serializer=featurestore_service.ListFeaturesRequest.serialize, + response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, + ) + return self._stubs['list_features'] + + @property + def update_feature(self) -> Callable[ + [featurestore_service.UpdateFeatureRequest], + Awaitable[gca_feature.Feature]]: + r"""Return a callable for the update feature method over gRPC. + + Updates the parameters of a single Feature. + + Returns: + Callable[[~.UpdateFeatureRequest], + Awaitable[~.Feature]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_feature' not in self._stubs: + self._stubs['update_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature', + request_serializer=featurestore_service.UpdateFeatureRequest.serialize, + response_deserializer=gca_feature.Feature.deserialize, + ) + return self._stubs['update_feature'] + + @property + def delete_feature(self) -> Callable[ + [featurestore_service.DeleteFeatureRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete feature method over gRPC. + + Deletes a single Feature. + + Returns: + Callable[[~.DeleteFeatureRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_feature' not in self._stubs: + self._stubs['delete_feature'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature', + request_serializer=featurestore_service.DeleteFeatureRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_feature'] + + @property + def import_feature_values(self) -> Callable[ + [featurestore_service.ImportFeatureValuesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the import feature values method over gRPC. + + Imports Feature values into the Featurestore from a + source storage. + The progress of the import is tracked by the returned + operation. The imported features are guaranteed to be + visible to subsequent read operations after the + operation is marked as successfully done. + If an import operation fails, the Feature values + returned from reads and exports may be inconsistent. If + consistency is required, the caller must retry the same + import request again and wait till the new operation + returned is marked as successfully done. + There are also scenarios where the caller can cause + inconsistency. + - Source data for import contains multiple distinct + Feature values for the same entity ID and timestamp. + - Source is modified during an import. This includes + adding, updating, or removing source data and/or + metadata. Examples of updating metadata include but are + not limited to changing storage location, storage class, + or retention policy. + - Online serving cluster is under-provisioned. + + Returns: + Callable[[~.ImportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'import_feature_values' not in self._stubs: + self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues', + request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['import_feature_values'] + + @property + def batch_read_feature_values(self) -> Callable[ + [featurestore_service.BatchReadFeatureValuesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch read feature values method over gRPC. + + Batch reads Feature values from a Featurestore. + This API enables batch reading Feature values, where + each read instance in the batch may read Feature values + of entities from one or more EntityTypes. Point-in-time + correctness is guaranteed for Feature values of each + read instance as of each instance's read timestamp. + + Returns: + Callable[[~.BatchReadFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_read_feature_values' not in self._stubs: + self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues', + request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_read_feature_values'] + + @property + def export_feature_values(self) -> Callable[ + [featurestore_service.ExportFeatureValuesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export feature values method over gRPC. + + Exports Feature values from all the entities of a + target EntityType. + + Returns: + Callable[[~.ExportFeatureValuesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_feature_values' not in self._stubs: + self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues', + request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_feature_values'] + + @property + def search_features(self) -> Callable[ + [featurestore_service.SearchFeaturesRequest], + Awaitable[featurestore_service.SearchFeaturesResponse]]: + r"""Return a callable for the search features method over gRPC. + + Searches Features matching a query in a given + project. + + Returns: + Callable[[~.SearchFeaturesRequest], + Awaitable[~.SearchFeaturesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_features' not in self._stubs: + self._stubs['search_features'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures', + request_serializer=featurestore_service.SearchFeaturesRequest.serialize, + response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, + ) + return self._stubs['search_features'] + + +__all__ = ( + 'FeaturestoreServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py new file mode 100644 index 0000000000..fb5d596b18 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import IndexEndpointServiceClient +from .async_client import IndexEndpointServiceAsyncClient + +__all__ = ( + 'IndexEndpointServiceClient', + 'IndexEndpointServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py new file mode 100644 index 0000000000..c7f921df76 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -0,0 +1,817 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport +from .client import IndexEndpointServiceClient + + +class IndexEndpointServiceAsyncClient: + """A service for managing Vertex AI's IndexEndpoints.""" + + _client: IndexEndpointServiceClient + + DEFAULT_ENDPOINT = IndexEndpointServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = IndexEndpointServiceClient.DEFAULT_MTLS_ENDPOINT + + index_path = staticmethod(IndexEndpointServiceClient.index_path) + parse_index_path = staticmethod(IndexEndpointServiceClient.parse_index_path) + index_endpoint_path = staticmethod(IndexEndpointServiceClient.index_endpoint_path) + parse_index_endpoint_path = staticmethod(IndexEndpointServiceClient.parse_index_endpoint_path) + common_billing_account_path = staticmethod(IndexEndpointServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(IndexEndpointServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(IndexEndpointServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(IndexEndpointServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(IndexEndpointServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(IndexEndpointServiceClient.parse_common_organization_path) + common_project_path = staticmethod(IndexEndpointServiceClient.common_project_path) + parse_common_project_path = staticmethod(IndexEndpointServiceClient.parse_common_project_path) + common_location_path = staticmethod(IndexEndpointServiceClient.common_location_path) + parse_common_location_path = staticmethod(IndexEndpointServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceAsyncClient: The constructed client. + """ + return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceAsyncClient: The constructed client. + """ + return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexEndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexEndpointServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(IndexEndpointServiceClient).get_transport_class, type(IndexEndpointServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, IndexEndpointServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.IndexEndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = IndexEndpointServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_index_endpoint(self, + request: index_endpoint_service.CreateIndexEndpointRequest = None, + *, + parent: str = None, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an IndexEndpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest`): + The request object. Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + parent (:class:`str`): + Required. The resource name of the Location to create + the IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): + Required. The IndexEndpoint to + create. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index_endpoint]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.CreateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index_endpoint is not None: + request.index_endpoint = index_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index_endpoint.IndexEndpoint, + metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_index_endpoint(self, + request: index_endpoint_service.GetIndexEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index_endpoint.IndexEndpoint: + r"""Gets an IndexEndpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest`): + The request object. Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] + name (:class:`str`): + Required. The name of the IndexEndpoint resource. + Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.GetIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_index_endpoints(self, + request: index_endpoint_service.ListIndexEndpointsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexEndpointsAsyncPager: + r"""Lists IndexEndpoints in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest`): + The request object. Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager: + Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.ListIndexEndpointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_index_endpoints, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListIndexEndpointsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_index_endpoint(self, + request: index_endpoint_service.UpdateIndexEndpointRequest = None, + *, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_index_endpoint.IndexEndpoint: + r"""Updates an IndexEndpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest`): + The request object. Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. + index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): + Required. The IndexEndpoint which + replaces the resource on the server. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.UpdateIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint.name", request.index_endpoint.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_index_endpoint(self, + request: index_endpoint_service.DeleteIndexEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an IndexEndpoint. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest`): + The request object. Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. + name (:class:`str`): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.DeleteIndexEndpointRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_index_endpoint, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def deploy_index(self, + request: index_endpoint_service.DeployIndexRequest = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeployIndexRequest`): + The request object. Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (:class:`google.cloud.aiplatform_v1beta1.types.DeployedIndex`): + Required. The DeployedIndex to be + created within the IndexEndpoint. + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` + Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.DeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.deploy_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.DeployIndexResponse, + metadata_type=index_endpoint_service.DeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def undeploy_index(self, + request: index_endpoint_service.UndeployIndexRequest = None, + *, + index_endpoint: str = None, + deployed_index_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest`): + The request object. Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource from + which to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index_id (:class:`str`): + Required. The ID of the DeployedIndex + to be undeployed from the IndexEndpoint. + + This corresponds to the ``deployed_index_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` + Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_endpoint_service.UndeployIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index_id is not None: + request.deployed_index_id = deployed_index_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undeploy_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.UndeployIndexResponse, + metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexEndpointServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py new file mode 100644 index 0000000000..c3c79243f2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -0,0 +1,1013 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import IndexEndpointServiceGrpcTransport +from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport + + +class IndexEndpointServiceClientMeta(type): + """Metaclass for the IndexEndpointService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] + _transport_registry["grpc"] = IndexEndpointServiceGrpcTransport + _transport_registry["grpc_asyncio"] = IndexEndpointServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[IndexEndpointServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class IndexEndpointServiceClient(metaclass=IndexEndpointServiceClientMeta): + """A service for managing Vertex AI's IndexEndpoints.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexEndpointServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexEndpointServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexEndpointServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def index_path(project: str,location: str,index: str,) -> str: + """Returns a fully-qualified index string.""" + return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + + @staticmethod + def parse_index_path(path: str) -> Dict[str,str]: + """Parses a index path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: + """Returns a fully-qualified index_endpoint string.""" + return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + + @staticmethod + def parse_index_endpoint_path(path: str) -> Dict[str,str]: + """Parses a index_endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, IndexEndpointServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index endpoint service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, IndexEndpointServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, IndexEndpointServiceTransport): + # transport is a IndexEndpointServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_index_endpoint(self, + request: index_endpoint_service.CreateIndexEndpointRequest = None, + *, + parent: str = None, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an IndexEndpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest): + The request object. Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + parent (str): + Required. The resource name of the Location to create + the IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint to + create. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index_endpoint]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.CreateIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.CreateIndexEndpointRequest): + request = index_endpoint_service.CreateIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index_endpoint is not None: + request.index_endpoint = index_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index_endpoint.IndexEndpoint, + metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, + ) + + # Done; return the response. + return response + + def get_index_endpoint(self, + request: index_endpoint_service.GetIndexEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index_endpoint.IndexEndpoint: + r"""Gets an IndexEndpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest): + The request object. Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] + name (str): + Required. The name of the IndexEndpoint resource. + Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.GetIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.GetIndexEndpointRequest): + request = index_endpoint_service.GetIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_index_endpoints(self, + request: index_endpoint_service.ListIndexEndpointsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexEndpointsPager: + r"""Lists IndexEndpoints in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): + The request object. Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + parent (str): + Required. The resource name of the Location from which + to list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsPager: + Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.ListIndexEndpointsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.ListIndexEndpointsRequest): + request = index_endpoint_service.ListIndexEndpointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_index_endpoints] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListIndexEndpointsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_index_endpoint(self, + request: index_endpoint_service.UpdateIndexEndpointRequest = None, + *, + index_endpoint: gca_index_endpoint.IndexEndpoint = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_index_endpoint.IndexEndpoint: + r"""Updates an IndexEndpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest): + The request object. Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint which + replaces the resource on the server. + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.IndexEndpoint: + Indexes are deployed into it. An + IndexEndpoint can have multiple + DeployedIndexes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.UpdateIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.UpdateIndexEndpointRequest): + request = index_endpoint_service.UpdateIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint.name", request.index_endpoint.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_index_endpoint(self, + request: index_endpoint_service.DeleteIndexEndpointRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an IndexEndpoint. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest): + The request object. Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. + name (str): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.DeleteIndexEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.DeleteIndexEndpointRequest): + request = index_endpoint_service.DeleteIndexEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_index_endpoint] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def deploy_index(self, + request: index_endpoint_service.DeployIndexRequest = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeployIndexRequest): + The request object. Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be + created within the IndexEndpoint. + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` + Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.DeployIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.DeployIndexRequest): + request = index_endpoint_service.DeployIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deploy_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.DeployIndexResponse, + metadata_type=index_endpoint_service.DeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def undeploy_index(self, + request: index_endpoint_service.UndeployIndexRequest = None, + *, + index_endpoint: str = None, + deployed_index_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest): + The request object. Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource from + which to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index_id (str): + Required. The ID of the DeployedIndex + to be undeployed from the IndexEndpoint. + + This corresponds to the ``deployed_index_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` + Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.UndeployIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.UndeployIndexRequest): + request = index_endpoint_service.UndeployIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index_id is not None: + request.deployed_index_id = deployed_index_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undeploy_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index_endpoint", request.index_endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.UndeployIndexResponse, + metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexEndpointServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py new file mode 100644 index 0000000000..203d843765 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service + + +class ListIndexEndpointsPager: + """A pager for iterating through ``list_index_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``index_endpoints`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListIndexEndpoints`` requests and continue to iterate + through the ``index_endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., index_endpoint_service.ListIndexEndpointsResponse], + request: index_endpoint_service.ListIndexEndpointsRequest, + response: index_endpoint_service.ListIndexEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_endpoint_service.ListIndexEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[index_endpoint_service.ListIndexEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[index_endpoint.IndexEndpoint]: + for page in self.pages: + yield from page.index_endpoints + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListIndexEndpointsAsyncPager: + """A pager for iterating through ``list_index_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``index_endpoints`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListIndexEndpoints`` requests and continue to iterate + through the ``index_endpoints`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[index_endpoint_service.ListIndexEndpointsResponse]], + request: index_endpoint_service.ListIndexEndpointsRequest, + response: index_endpoint_service.ListIndexEndpointsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_endpoint_service.ListIndexEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[index_endpoint_service.ListIndexEndpointsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[index_endpoint.IndexEndpoint]: + async def async_generator(): + async for page in self.pages: + for response in page.index_endpoints: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py new file mode 100644 index 0000000000..42d3519efd --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import IndexEndpointServiceTransport +from .grpc import IndexEndpointServiceGrpcTransport +from .grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] +_transport_registry['grpc'] = IndexEndpointServiceGrpcTransport +_transport_registry['grpc_asyncio'] = IndexEndpointServiceGrpcAsyncIOTransport + +__all__ = ( + 'IndexEndpointServiceTransport', + 'IndexEndpointServiceGrpcTransport', + 'IndexEndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py new file mode 100644 index 0000000000..53f8a3e391 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class IndexEndpointServiceTransport(abc.ABC): + """Abstract transport class for IndexEndpointService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_index_endpoint: gapic_v1.method.wrap_method( + self.create_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.get_index_endpoint: gapic_v1.method.wrap_method( + self.get_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.list_index_endpoints: gapic_v1.method.wrap_method( + self.list_index_endpoints, + default_timeout=5.0, + client_info=client_info, + ), + self.update_index_endpoint: gapic_v1.method.wrap_method( + self.update_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_index_endpoint: gapic_v1.method.wrap_method( + self.delete_index_endpoint, + default_timeout=5.0, + client_info=client_info, + ), + self.deploy_index: gapic_v1.method.wrap_method( + self.deploy_index, + default_timeout=5.0, + client_info=client_info, + ), + self.undeploy_index: gapic_v1.method.wrap_method( + self.undeploy_index, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_index_endpoint(self) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_index_endpoint(self) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + Union[ + index_endpoint.IndexEndpoint, + Awaitable[index_endpoint.IndexEndpoint] + ]]: + raise NotImplementedError() + + @property + def list_index_endpoints(self) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + Union[ + index_endpoint_service.ListIndexEndpointsResponse, + Awaitable[index_endpoint_service.ListIndexEndpointsResponse] + ]]: + raise NotImplementedError() + + @property + def update_index_endpoint(self) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + Union[ + gca_index_endpoint.IndexEndpoint, + Awaitable[gca_index_endpoint.IndexEndpoint] + ]]: + raise NotImplementedError() + + @property + def delete_index_endpoint(self) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def deploy_index(self) -> Callable[ + [index_endpoint_service.DeployIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def undeploy_index(self) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'IndexEndpointServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py new file mode 100644 index 0000000000..714fe8073c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -0,0 +1,433 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO + + +class IndexEndpointServiceGrpcTransport(IndexEndpointServiceTransport): + """gRPC backend transport for IndexEndpointService. + + A service for managing Vertex AI's IndexEndpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index_endpoint(self) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the create index endpoint method over gRPC. + + Creates an IndexEndpoint. + + Returns: + Callable[[~.CreateIndexEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index_endpoint' not in self._stubs: + self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint', + request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index_endpoint'] + + @property + def get_index_endpoint(self) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + index_endpoint.IndexEndpoint]: + r"""Return a callable for the get index endpoint method over gRPC. + + Gets an IndexEndpoint. + + Returns: + Callable[[~.GetIndexEndpointRequest], + ~.IndexEndpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index_endpoint' not in self._stubs: + self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint', + request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, + response_deserializer=index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['get_index_endpoint'] + + @property + def list_index_endpoints(self) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + index_endpoint_service.ListIndexEndpointsResponse]: + r"""Return a callable for the list index endpoints method over gRPC. + + Lists IndexEndpoints in a Location. + + Returns: + Callable[[~.ListIndexEndpointsRequest], + ~.ListIndexEndpointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_index_endpoints' not in self._stubs: + self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints', + request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, + response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, + ) + return self._stubs['list_index_endpoints'] + + @property + def update_index_endpoint(self) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + gca_index_endpoint.IndexEndpoint]: + r"""Return a callable for the update index endpoint method over gRPC. + + Updates an IndexEndpoint. + + Returns: + Callable[[~.UpdateIndexEndpointRequest], + ~.IndexEndpoint]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index_endpoint' not in self._stubs: + self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint', + request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, + response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['update_index_endpoint'] + + @property + def delete_index_endpoint(self) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete index endpoint method over gRPC. + + Deletes an IndexEndpoint. + + Returns: + Callable[[~.DeleteIndexEndpointRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index_endpoint' not in self._stubs: + self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint', + request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index_endpoint'] + + @property + def deploy_index(self) -> Callable[ + [index_endpoint_service.DeployIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the deploy index method over gRPC. + + Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Returns: + Callable[[~.DeployIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_index' not in self._stubs: + self._stubs['deploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex', + request_serializer=index_endpoint_service.DeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_index'] + + @property + def undeploy_index(self) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the undeploy index method over gRPC. + + Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_index' not in self._stubs: + self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex', + request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_index'] + + +__all__ = ( + 'IndexEndpointServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..ba6a67a554 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -0,0 +1,437 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import IndexEndpointServiceGrpcTransport + + +class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport): + """gRPC AsyncIO backend transport for IndexEndpointService. + + A service for managing Vertex AI's IndexEndpoints. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index_endpoint(self) -> Callable[ + [index_endpoint_service.CreateIndexEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create index endpoint method over gRPC. + + Creates an IndexEndpoint. + + Returns: + Callable[[~.CreateIndexEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index_endpoint' not in self._stubs: + self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint', + request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index_endpoint'] + + @property + def get_index_endpoint(self) -> Callable[ + [index_endpoint_service.GetIndexEndpointRequest], + Awaitable[index_endpoint.IndexEndpoint]]: + r"""Return a callable for the get index endpoint method over gRPC. + + Gets an IndexEndpoint. + + Returns: + Callable[[~.GetIndexEndpointRequest], + Awaitable[~.IndexEndpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index_endpoint' not in self._stubs: + self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint', + request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, + response_deserializer=index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['get_index_endpoint'] + + @property + def list_index_endpoints(self) -> Callable[ + [index_endpoint_service.ListIndexEndpointsRequest], + Awaitable[index_endpoint_service.ListIndexEndpointsResponse]]: + r"""Return a callable for the list index endpoints method over gRPC. + + Lists IndexEndpoints in a Location. + + Returns: + Callable[[~.ListIndexEndpointsRequest], + Awaitable[~.ListIndexEndpointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_index_endpoints' not in self._stubs: + self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints', + request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, + response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, + ) + return self._stubs['list_index_endpoints'] + + @property + def update_index_endpoint(self) -> Callable[ + [index_endpoint_service.UpdateIndexEndpointRequest], + Awaitable[gca_index_endpoint.IndexEndpoint]]: + r"""Return a callable for the update index endpoint method over gRPC. + + Updates an IndexEndpoint. + + Returns: + Callable[[~.UpdateIndexEndpointRequest], + Awaitable[~.IndexEndpoint]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index_endpoint' not in self._stubs: + self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint', + request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, + response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, + ) + return self._stubs['update_index_endpoint'] + + @property + def delete_index_endpoint(self) -> Callable[ + [index_endpoint_service.DeleteIndexEndpointRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete index endpoint method over gRPC. + + Deletes an IndexEndpoint. + + Returns: + Callable[[~.DeleteIndexEndpointRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index_endpoint' not in self._stubs: + self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint', + request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index_endpoint'] + + @property + def deploy_index(self) -> Callable[ + [index_endpoint_service.DeployIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the deploy index method over gRPC. + + Deploys an Index into this IndexEndpoint, creating a + DeployedIndex within it. + Only non-empty Indexes can be deployed. + + Returns: + Callable[[~.DeployIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'deploy_index' not in self._stubs: + self._stubs['deploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex', + request_serializer=index_endpoint_service.DeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['deploy_index'] + + @property + def undeploy_index(self) -> Callable[ + [index_endpoint_service.UndeployIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undeploy index method over gRPC. + + Undeploys an Index from an IndexEndpoint, removing a + DeployedIndex from it, and freeing all resources it's + using. + + Returns: + Callable[[~.UndeployIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undeploy_index' not in self._stubs: + self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex', + request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undeploy_index'] + + +__all__ = ( + 'IndexEndpointServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py new file mode 100644 index 0000000000..d2a09db9f1 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import IndexServiceClient +from .async_client import IndexServiceAsyncClient + +__all__ = ( + 'IndexServiceClient', + 'IndexServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py new file mode 100644 index 0000000000..eabfafcc6e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py @@ -0,0 +1,633 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport +from .client import IndexServiceClient + + +class IndexServiceAsyncClient: + """A service for creating and managing Vertex AI's Index + resources. + """ + + _client: IndexServiceClient + + DEFAULT_ENDPOINT = IndexServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = IndexServiceClient.DEFAULT_MTLS_ENDPOINT + + index_path = staticmethod(IndexServiceClient.index_path) + parse_index_path = staticmethod(IndexServiceClient.parse_index_path) + index_endpoint_path = staticmethod(IndexServiceClient.index_endpoint_path) + parse_index_endpoint_path = staticmethod(IndexServiceClient.parse_index_endpoint_path) + common_billing_account_path = staticmethod(IndexServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(IndexServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(IndexServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(IndexServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(IndexServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(IndexServiceClient.parse_common_organization_path) + common_project_path = staticmethod(IndexServiceClient.common_project_path) + parse_common_project_path = staticmethod(IndexServiceClient.parse_common_project_path) + common_location_path = staticmethod(IndexServiceClient.common_location_path) + parse_common_location_path = staticmethod(IndexServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceAsyncClient: The constructed client. + """ + return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceAsyncClient: The constructed client. + """ + return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(IndexServiceClient).get_transport_class, type(IndexServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, IndexServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.IndexServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = IndexServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_index(self, + request: index_service.CreateIndexRequest = None, + *, + parent: str = None, + index: gca_index.Index = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates an Index. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateIndexRequest`): + The request object. Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Index in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): + Required. The Index to create. + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.CreateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index is not None: + request.index = index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index.Index, + metadata_type=index_service.CreateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_index(self, + request: index_service.GetIndexRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index.Index: + r"""Gets an Index. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetIndexRequest`): + The request object. Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] + name (:class:`str`): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Index: + A representation of a collection of + database items organized in a way that + allows for approximate nearest neighbor + (a.k.a ANN) algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.GetIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_indexes(self, + request: index_service.ListIndexesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexesAsyncPager: + r"""Lists Indexes in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListIndexesRequest`): + The request object. Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the Indexes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager: + Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.ListIndexesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_indexes, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListIndexesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_index(self, + request: index_service.UpdateIndexRequest = None, + *, + index: gca_index.Index = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an Index. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest`): + The request object. Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): + Required. The Index which updates the + resource on the server. + + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.UpdateIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index is not None: + request.index = index + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index.name", request.index.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_index.Index, + metadata_type=index_service.UpdateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_index(self, + request: index_service.DeleteIndexRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest`): + The request object. Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. + name (:class:`str`): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = index_service.DeleteIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_index, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py new file mode 100644 index 0000000000..9e2e612c81 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py @@ -0,0 +1,829 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.index_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import IndexServiceGrpcTransport +from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport + + +class IndexServiceClientMeta(type): + """Metaclass for the IndexService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] + _transport_registry["grpc"] = IndexServiceGrpcTransport + _transport_registry["grpc_asyncio"] = IndexServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[IndexServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class IndexServiceClient(metaclass=IndexServiceClientMeta): + """A service for creating and managing Vertex AI's Index + resources. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + IndexServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> IndexServiceTransport: + """Returns the transport used by the client instance. + + Returns: + IndexServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def index_path(project: str,location: str,index: str,) -> str: + """Returns a fully-qualified index string.""" + return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + + @staticmethod + def parse_index_path(path: str) -> Dict[str,str]: + """Parses a index path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: + """Returns a fully-qualified index_endpoint string.""" + return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + + @staticmethod + def parse_index_endpoint_path(path: str) -> Dict[str,str]: + """Parses a index_endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, IndexServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the index service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, IndexServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, IndexServiceTransport): + # transport is a IndexServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_index(self, + request: index_service.CreateIndexRequest = None, + *, + parent: str = None, + index: gca_index.Index = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates an Index. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateIndexRequest): + The request object. Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + parent (str): + Required. The resource name of the Location to create + the Index in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index to create. + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, index]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.CreateIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.CreateIndexRequest): + request = index_service.CreateIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if index is not None: + request.index = index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index.Index, + metadata_type=index_service.CreateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def get_index(self, + request: index_service.GetIndexRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> index.Index: + r"""Gets an Index. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetIndexRequest): + The request object. Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] + name (str): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Index: + A representation of a collection of + database items organized in a way that + allows for approximate nearest neighbor + (a.k.a ANN) algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.GetIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.GetIndexRequest): + request = index_service.GetIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_indexes(self, + request: index_service.ListIndexesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListIndexesPager: + r"""Lists Indexes in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): + The request object. Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + parent (str): + Required. The resource name of the Location from which + to list the Indexes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesPager: + Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.ListIndexesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.ListIndexesRequest): + request = index_service.ListIndexesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_indexes] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListIndexesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_index(self, + request: index_service.UpdateIndexRequest = None, + *, + index: gca_index.Index = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates an Index. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest): + The request object. Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index which updates the + resource on the server. + + This corresponds to the ``index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that + allows for approximate nearest neighbor (a.k.a ANN) + algorithms search. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.UpdateIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.UpdateIndexRequest): + request = index_service.UpdateIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index is not None: + request.index = index + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("index.name", request.index.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_index.Index, + metadata_type=index_service.UpdateIndexOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_index(self, + request: index_service.DeleteIndexRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest): + The request object. Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. + name (str): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a index_service.DeleteIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_service.DeleteIndexRequest): + request = index_service.DeleteIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "IndexServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py new file mode 100644 index 0000000000..e4e1d47ada --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service + + +class ListIndexesPager: + """A pager for iterating through ``list_indexes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``indexes`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListIndexes`` requests and continue to iterate + through the ``indexes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., index_service.ListIndexesResponse], + request: index_service.ListIndexesRequest, + response: index_service.ListIndexesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_service.ListIndexesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[index_service.ListIndexesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[index.Index]: + for page in self.pages: + yield from page.indexes + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListIndexesAsyncPager: + """A pager for iterating through ``list_indexes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``indexes`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListIndexes`` requests and continue to iterate + through the ``indexes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[index_service.ListIndexesResponse]], + request: index_service.ListIndexesRequest, + response: index_service.ListIndexesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = index_service.ListIndexesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[index_service.ListIndexesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[index.Index]: + async def async_generator(): + async for page in self.pages: + for response in page.indexes: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py new file mode 100644 index 0000000000..2f263f2fb8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import IndexServiceTransport +from .grpc import IndexServiceGrpcTransport +from .grpc_asyncio import IndexServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] +_transport_registry['grpc'] = IndexServiceGrpcTransport +_transport_registry['grpc_asyncio'] = IndexServiceGrpcAsyncIOTransport + +__all__ = ( + 'IndexServiceTransport', + 'IndexServiceGrpcTransport', + 'IndexServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py new file mode 100644 index 0000000000..3d126acabe --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class IndexServiceTransport(abc.ABC): + """Abstract transport class for IndexService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_index: gapic_v1.method.wrap_method( + self.create_index, + default_timeout=5.0, + client_info=client_info, + ), + self.get_index: gapic_v1.method.wrap_method( + self.get_index, + default_timeout=5.0, + client_info=client_info, + ), + self.list_indexes: gapic_v1.method.wrap_method( + self.list_indexes, + default_timeout=5.0, + client_info=client_info, + ), + self.update_index: gapic_v1.method.wrap_method( + self.update_index, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_index: gapic_v1.method.wrap_method( + self.delete_index, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_index(self) -> Callable[ + [index_service.CreateIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_index(self) -> Callable[ + [index_service.GetIndexRequest], + Union[ + index.Index, + Awaitable[index.Index] + ]]: + raise NotImplementedError() + + @property + def list_indexes(self) -> Callable[ + [index_service.ListIndexesRequest], + Union[ + index_service.ListIndexesResponse, + Awaitable[index_service.ListIndexesResponse] + ]]: + raise NotImplementedError() + + @property + def update_index(self) -> Callable[ + [index_service.UpdateIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_index(self) -> Callable[ + [index_service.DeleteIndexRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'IndexServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py new file mode 100644 index 0000000000..adfe4346db --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO + + +class IndexServiceGrpcTransport(IndexServiceTransport): + """gRPC backend transport for IndexService. + + A service for creating and managing Vertex AI's Index + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index(self) -> Callable[ + [index_service.CreateIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the create index method over gRPC. + + Creates an Index. + + Returns: + Callable[[~.CreateIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index' not in self._stubs: + self._stubs['create_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex', + request_serializer=index_service.CreateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index'] + + @property + def get_index(self) -> Callable[ + [index_service.GetIndexRequest], + index.Index]: + r"""Return a callable for the get index method over gRPC. + + Gets an Index. + + Returns: + Callable[[~.GetIndexRequest], + ~.Index]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index' not in self._stubs: + self._stubs['get_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/GetIndex', + request_serializer=index_service.GetIndexRequest.serialize, + response_deserializer=index.Index.deserialize, + ) + return self._stubs['get_index'] + + @property + def list_indexes(self) -> Callable[ + [index_service.ListIndexesRequest], + index_service.ListIndexesResponse]: + r"""Return a callable for the list indexes method over gRPC. + + Lists Indexes in a Location. + + Returns: + Callable[[~.ListIndexesRequest], + ~.ListIndexesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_indexes' not in self._stubs: + self._stubs['list_indexes'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes', + request_serializer=index_service.ListIndexesRequest.serialize, + response_deserializer=index_service.ListIndexesResponse.deserialize, + ) + return self._stubs['list_indexes'] + + @property + def update_index(self) -> Callable[ + [index_service.UpdateIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the update index method over gRPC. + + Updates an Index. + + Returns: + Callable[[~.UpdateIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index' not in self._stubs: + self._stubs['update_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex', + request_serializer=index_service.UpdateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_index'] + + @property + def delete_index(self) -> Callable[ + [index_service.DeleteIndexRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete index method over gRPC. + + Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Returns: + Callable[[~.DeleteIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index' not in self._stubs: + self._stubs['delete_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex', + request_serializer=index_service.DeleteIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index'] + + +__all__ = ( + 'IndexServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..1ead81529f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py @@ -0,0 +1,383 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.longrunning import operations_pb2 # type: ignore +from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import IndexServiceGrpcTransport + + +class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport): + """gRPC AsyncIO backend transport for IndexService. + + A service for creating and managing Vertex AI's Index + resources. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_index(self) -> Callable[ + [index_service.CreateIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create index method over gRPC. + + Creates an Index. + + Returns: + Callable[[~.CreateIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_index' not in self._stubs: + self._stubs['create_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex', + request_serializer=index_service.CreateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_index'] + + @property + def get_index(self) -> Callable[ + [index_service.GetIndexRequest], + Awaitable[index.Index]]: + r"""Return a callable for the get index method over gRPC. + + Gets an Index. + + Returns: + Callable[[~.GetIndexRequest], + Awaitable[~.Index]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_index' not in self._stubs: + self._stubs['get_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/GetIndex', + request_serializer=index_service.GetIndexRequest.serialize, + response_deserializer=index.Index.deserialize, + ) + return self._stubs['get_index'] + + @property + def list_indexes(self) -> Callable[ + [index_service.ListIndexesRequest], + Awaitable[index_service.ListIndexesResponse]]: + r"""Return a callable for the list indexes method over gRPC. + + Lists Indexes in a Location. + + Returns: + Callable[[~.ListIndexesRequest], + Awaitable[~.ListIndexesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_indexes' not in self._stubs: + self._stubs['list_indexes'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes', + request_serializer=index_service.ListIndexesRequest.serialize, + response_deserializer=index_service.ListIndexesResponse.deserialize, + ) + return self._stubs['list_indexes'] + + @property + def update_index(self) -> Callable[ + [index_service.UpdateIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update index method over gRPC. + + Updates an Index. + + Returns: + Callable[[~.UpdateIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_index' not in self._stubs: + self._stubs['update_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex', + request_serializer=index_service.UpdateIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_index'] + + @property + def delete_index(self) -> Callable[ + [index_service.DeleteIndexRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete index method over gRPC. + + Deletes an Index. An Index can only be deleted when all its + [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] + had been undeployed. + + Returns: + Callable[[~.DeleteIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_index' not in self._stubs: + self._stubs['delete_index'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex', + request_serializer=index_service.DeleteIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_index'] + + +__all__ = ( + 'IndexServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py new file mode 100644 index 0000000000..817e1b49e2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import JobServiceClient +from .async_client import JobServiceAsyncClient + +__all__ = ( + 'JobServiceClient', + 'JobServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py new file mode 100644 index 0000000000..eb9907a8ca --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -0,0 +1,2615 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.job_service import pagers +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import completion_stats +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_monitoring +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import study +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport +from .client import JobServiceClient + + +class JobServiceAsyncClient: + """A service for creating and managing Vertex AI's jobs.""" + + _client: JobServiceClient + + DEFAULT_ENDPOINT = JobServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = JobServiceClient.DEFAULT_MTLS_ENDPOINT + + batch_prediction_job_path = staticmethod(JobServiceClient.batch_prediction_job_path) + parse_batch_prediction_job_path = staticmethod(JobServiceClient.parse_batch_prediction_job_path) + custom_job_path = staticmethod(JobServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(JobServiceClient.parse_custom_job_path) + data_labeling_job_path = staticmethod(JobServiceClient.data_labeling_job_path) + parse_data_labeling_job_path = staticmethod(JobServiceClient.parse_data_labeling_job_path) + dataset_path = staticmethod(JobServiceClient.dataset_path) + parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path) + endpoint_path = staticmethod(JobServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(JobServiceClient.parse_endpoint_path) + hyperparameter_tuning_job_path = staticmethod(JobServiceClient.hyperparameter_tuning_job_path) + parse_hyperparameter_tuning_job_path = staticmethod(JobServiceClient.parse_hyperparameter_tuning_job_path) + model_path = staticmethod(JobServiceClient.model_path) + parse_model_path = staticmethod(JobServiceClient.parse_model_path) + model_deployment_monitoring_job_path = staticmethod(JobServiceClient.model_deployment_monitoring_job_path) + parse_model_deployment_monitoring_job_path = staticmethod(JobServiceClient.parse_model_deployment_monitoring_job_path) + network_path = staticmethod(JobServiceClient.network_path) + parse_network_path = staticmethod(JobServiceClient.parse_network_path) + tensorboard_path = staticmethod(JobServiceClient.tensorboard_path) + parse_tensorboard_path = staticmethod(JobServiceClient.parse_tensorboard_path) + trial_path = staticmethod(JobServiceClient.trial_path) + parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) + common_billing_account_path = staticmethod(JobServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(JobServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(JobServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(JobServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(JobServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(JobServiceClient.parse_common_organization_path) + common_project_path = staticmethod(JobServiceClient.common_project_path) + parse_common_project_path = staticmethod(JobServiceClient.parse_common_project_path) + common_location_path = staticmethod(JobServiceClient.common_location_path) + parse_common_location_path = staticmethod(JobServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceAsyncClient: The constructed client. + """ + return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceAsyncClient: The constructed client. + """ + return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobServiceTransport: + """Returns the transport used by the client instance. + + Returns: + JobServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(JobServiceClient).get_transport_class, type(JobServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, JobServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = JobServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_custom_job(self, + request: job_service.CreateCustomJobRequest = None, + *, + parent: str = None, + custom_job: gca_custom_job.CustomJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_custom_job.CustomJob: + r"""Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest`): + The request object. Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + custom_job (:class:`google.cloud.aiplatform_v1beta1.types.CustomJob`): + Required. The CustomJob to create. + This corresponds to the ``custom_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, custom_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if custom_job is not None: + request.custom_job = custom_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_custom_job(self, + request: job_service.GetCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> custom_job.CustomJob: + r"""Gets a CustomJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest`): + The request object. Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_custom_jobs(self, + request: job_service.ListCustomJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCustomJobsAsyncPager: + r"""Lists CustomJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest`): + The request object. Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsAsyncPager: + Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListCustomJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_custom_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListCustomJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_custom_job(self, + request: job_service.DeleteCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a CustomJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest`): + The request object. Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_custom_job(self, + request: job_service.CancelCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest`): + The request object. Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. + name (:class:`str`): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelCustomJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_custom_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_data_labeling_job(self, + request: job_service.CreateDataLabelingJobRequest = None, + *, + parent: str = None, + data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_data_labeling_job.DataLabelingJob: + r"""Creates a DataLabelingJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest`): + The request object. Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. + parent (:class:`str`): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + data_labeling_job (:class:`google.cloud.aiplatform_v1beta1.types.DataLabelingJob`): + Required. The DataLabelingJob to + create. + + This corresponds to the ``data_labeling_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, data_labeling_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if data_labeling_job is not None: + request.data_labeling_job = data_labeling_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_data_labeling_job(self, + request: job_service.GetDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> data_labeling_job.DataLabelingJob: + r"""Gets a DataLabelingJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest`): + The request object. Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_data_labeling_jobs(self, + request: job_service.ListDataLabelingJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataLabelingJobsAsyncPager: + r"""Lists DataLabelingJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest`): + The request object. Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + parent (:class:`str`): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: + Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListDataLabelingJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_data_labeling_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListDataLabelingJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_data_labeling_job(self, + request: job_service.DeleteDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a DataLabelingJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest`): + The request object. Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_data_labeling_job(self, + request: job_service.CancelDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest`): + The request object. Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. + name (:class:`str`): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelDataLabelingJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_data_labeling_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_hyperparameter_tuning_job(self, + request: job_service.CreateHyperparameterTuningJobRequest = None, + *, + parent: str = None, + hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Creates a HyperparameterTuningJob + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest`): + The request object. Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hyperparameter_tuning_job (:class:`google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob`): + Required. The HyperparameterTuningJob + to create. + + This corresponds to the ``hyperparameter_tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, hyperparameter_tuning_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if hyperparameter_tuning_job is not None: + request.hyperparameter_tuning_job = hyperparameter_tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_hyperparameter_tuning_job(self, + request: job_service.GetHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Gets a HyperparameterTuningJob + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest`): + The request object. Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob + resource. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_hyperparameter_tuning_jobs(self, + request: job_service.ListHyperparameterTuningJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHyperparameterTuningJobsAsyncPager: + r"""Lists HyperparameterTuningJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest`): + The request object. Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: + Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListHyperparameterTuningJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_hyperparameter_tuning_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListHyperparameterTuningJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_hyperparameter_tuning_job(self, + request: job_service.DeleteHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a HyperparameterTuningJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest`): + The request object. Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob + resource to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_hyperparameter_tuning_job(self, + request: job_service.CancelHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest`): + The request object. Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. + name (:class:`str`): + Required. The name of the HyperparameterTuningJob to + cancel. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelHyperparameterTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_batch_prediction_job(self, + request: job_service.CreateBatchPredictionJobRequest = None, + *, + parent: str = None, + batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_batch_prediction_job.BatchPredictionJob: + r"""Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest`): + The request object. Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + batch_prediction_job (:class:`google.cloud.aiplatform_v1beta1.types.BatchPredictionJob`): + Required. The BatchPredictionJob to + create. + + This corresponds to the ``batch_prediction_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, batch_prediction_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if batch_prediction_job is not None: + request.batch_prediction_job = batch_prediction_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_batch_prediction_job(self, + request: job_service.GetBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> batch_prediction_job.BatchPredictionJob: + r"""Gets a BatchPredictionJob + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest`): + The request object. Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_batch_prediction_jobs(self, + request: job_service.ListBatchPredictionJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBatchPredictionJobsAsyncPager: + r"""Lists BatchPredictionJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest`): + The request object. Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: + Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListBatchPredictionJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_batch_prediction_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBatchPredictionJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_batch_prediction_job(self, + request: job_service.DeleteBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest`): + The request object. Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob resource to + be deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_batch_prediction_job(self, + request: job_service.CancelBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest`): + The request object. Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. + name (:class:`str`): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CancelBatchPredictionJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_batch_prediction_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_model_deployment_monitoring_job(self, + request: job_service.CreateModelDeploymentMonitoringJobRequest = None, + *, + parent: str = None, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. + parent (:class:`str`): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): + Required. The + ModelDeploymentMonitoringJob to create + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_deployment_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.CreateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_model_deployment_monitoring_job, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def search_model_deployment_monitoring_stats_anomalies(self, + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest = None, + *, + model_deployment_monitoring_job: str = None, + deployed_model_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + r"""Searches Model Monitoring Statistics generated within + a given time window. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest`): + The request object. Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + model_deployment_monitoring_job (:class:`str`): + Required. ModelDeploymentMonitoring Job resource name. + Format: + \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + Required. The DeployedModel ID of the + [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_model_deployment_monitoring_stats_anomalies, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_deployment_monitoring_job(self, + request: job_service.GetModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Gets a ModelDeploymentMonitoringJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.GetModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_deployment_monitoring_jobs(self, + request: job_service.ListModelDeploymentMonitoringJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelDeploymentMonitoringJobsAsyncPager: + r"""Lists ModelDeploymentMonitoringJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest`): + The request object. Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + parent (:class:`str`): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager: + Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_deployment_monitoring_jobs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelDeploymentMonitoringJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_model_deployment_monitoring_job(self, + request: job_service.UpdateModelDeploymentMonitoringJobRequest = None, + *, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a ModelDeploymentMonitoringJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an + endpoint. It will analyze the logged training & + prediction data to detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_model_deployment_monitoring_job(self, + request: job_service.DeleteModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a ModelDeploymentMonitoringJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def pause_model_deployment_monitoring_job(self, + request: job_service.PauseModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.PauseModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.pause_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def resume_model_deployment_monitoring_job(self, + request: job_service.ResumeModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest`): + The request object. Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. + name (:class:`str`): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.resume_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py new file mode 100644 index 0000000000..afa2eb0e0a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -0,0 +1,2892 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.job_service import pagers +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import completion_stats +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_monitoring +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import study +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import JobServiceGrpcTransport +from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport + + +class JobServiceClientMeta(type): + """Metaclass for the JobService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] + _transport_registry["grpc"] = JobServiceGrpcTransport + _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[JobServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class JobServiceClient(metaclass=JobServiceClientMeta): + """A service for creating and managing Vertex AI's jobs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + JobServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> JobServiceTransport: + """Returns the transport used by the client instance. + + Returns: + JobServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def batch_prediction_job_path(project: str,location: str,batch_prediction_job: str,) -> str: + """Returns a fully-qualified batch_prediction_job string.""" + return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) + + @staticmethod + def parse_batch_prediction_job_path(path: str) -> Dict[str,str]: + """Parses a batch_prediction_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def data_labeling_job_path(project: str,location: str,data_labeling_job: str,) -> str: + """Returns a fully-qualified data_labeling_job string.""" + return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) + + @staticmethod + def parse_data_labeling_job_path(path: str) -> Dict[str,str]: + """Parses a data_labeling_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def hyperparameter_tuning_job_path(project: str,location: str,hyperparameter_tuning_job: str,) -> str: + """Returns a fully-qualified hyperparameter_tuning_job string.""" + return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) + + @staticmethod + def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str,str]: + """Parses a hyperparameter_tuning_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: + """Returns a fully-qualified model_deployment_monitoring_job string.""" + return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + + @staticmethod + def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: + """Parses a model_deployment_monitoring_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: + """Returns a fully-qualified tensorboard string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + + @staticmethod + def parse_tensorboard_path(path: str) -> Dict[str,str]: + """Parses a tensorboard path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def trial_path(project: str,location: str,study: str,trial: str,) -> str: + """Returns a fully-qualified trial string.""" + return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + + @staticmethod + def parse_trial_path(path: str) -> Dict[str,str]: + """Parses a trial path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, JobServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the job service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, JobServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, JobServiceTransport): + # transport is a JobServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_custom_job(self, + request: job_service.CreateCustomJobRequest = None, + *, + parent: str = None, + custom_job: gca_custom_job.CustomJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_custom_job.CustomJob: + r"""Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest): + The request object. Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. + parent (str): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + custom_job (google.cloud.aiplatform_v1beta1.types.CustomJob): + Required. The CustomJob to create. + This corresponds to the ``custom_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, custom_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateCustomJobRequest): + request = job_service.CreateCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if custom_job is not None: + request.custom_job = custom_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_custom_job(self, + request: job_service.GetCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> custom_job.CustomJob: + r"""Gets a CustomJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest): + The request object. Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. + name (str): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.CustomJob: + Represents a job that runs custom + workloads such as a Docker container or + a Python package. A CustomJob can have + multiple worker pools and each worker + pool can have its own machine and input + spec. A CustomJob will be cleaned up + once the job enters terminal state + (failed or succeeded). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetCustomJobRequest): + request = job_service.GetCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_custom_jobs(self, + request: job_service.ListCustomJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListCustomJobsPager: + r"""Lists CustomJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): + The request object. Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. + parent (str): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsPager: + Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListCustomJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListCustomJobsRequest): + request = job_service.ListCustomJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListCustomJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_custom_job(self, + request: job_service.DeleteCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a CustomJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest): + The request object. Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. + name (str): + Required. The name of the CustomJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteCustomJobRequest): + request = job_service.DeleteCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_custom_job(self, + request: job_service.CancelCustomJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest): + The request object. Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. + name (str): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelCustomJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelCustomJobRequest): + request = job_service.CancelCustomJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_data_labeling_job(self, + request: job_service.CreateDataLabelingJobRequest = None, + *, + parent: str = None, + data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_data_labeling_job.DataLabelingJob: + r"""Creates a DataLabelingJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest): + The request object. Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + data_labeling_job (google.cloud.aiplatform_v1beta1.types.DataLabelingJob): + Required. The DataLabelingJob to + create. + + This corresponds to the ``data_labeling_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, data_labeling_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateDataLabelingJobRequest): + request = job_service.CreateDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if data_labeling_job is not None: + request.data_labeling_job = data_labeling_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_data_labeling_job(self, + request: job_service.GetDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> data_labeling_job.DataLabelingJob: + r"""Gets a DataLabelingJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest): + The request object. Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.DataLabelingJob: + DataLabelingJob is used to trigger a + human labeling job on unlabeled data + from the following Dataset: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetDataLabelingJobRequest): + request = job_service.GetDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_data_labeling_jobs(self, + request: job_service.ListDataLabelingJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListDataLabelingJobsPager: + r"""Lists DataLabelingJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): + The request object. Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsPager: + Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListDataLabelingJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListDataLabelingJobsRequest): + request = job_service.ListDataLabelingJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListDataLabelingJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_data_labeling_job(self, + request: job_service.DeleteDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a DataLabelingJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest): + The request object. Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteDataLabelingJobRequest): + request = job_service.DeleteDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_data_labeling_job(self, + request: job_service.CancelDataLabelingJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest): + The request object. Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelDataLabelingJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelDataLabelingJobRequest): + request = job_service.CancelDataLabelingJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_hyperparameter_tuning_job(self, + request: job_service.CreateHyperparameterTuningJobRequest = None, + *, + parent: str = None, + hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Creates a HyperparameterTuningJob + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest): + The request object. Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. + parent (str): + Required. The resource name of the Location to create + the HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + hyperparameter_tuning_job (google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob): + Required. The HyperparameterTuningJob + to create. + + This corresponds to the ``hyperparameter_tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, hyperparameter_tuning_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest): + request = job_service.CreateHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if hyperparameter_tuning_job is not None: + request.hyperparameter_tuning_job = hyperparameter_tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_hyperparameter_tuning_job(self, + request: job_service.GetHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> hyperparameter_tuning_job.HyperparameterTuningJob: + r"""Gets a HyperparameterTuningJob + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest): + The request object. Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob + resource. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: + Represents a HyperparameterTuningJob. + A HyperparameterTuningJob has a Study + specification and multiple CustomJobs + with identical CustomJob specification. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetHyperparameterTuningJobRequest): + request = job_service.GetHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_hyperparameter_tuning_jobs(self, + request: job_service.ListHyperparameterTuningJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHyperparameterTuningJobsPager: + r"""Lists HyperparameterTuningJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): + The request object. Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. + parent (str): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsPager: + Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListHyperparameterTuningJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest): + request = job_service.ListHyperparameterTuningJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_hyperparameter_tuning_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListHyperparameterTuningJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_hyperparameter_tuning_job(self, + request: job_service.DeleteHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a HyperparameterTuningJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest): + The request object. Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob + resource to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest): + request = job_service.DeleteHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_hyperparameter_tuning_job(self, + request: job_service.CancelHyperparameterTuningJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest): + The request object. Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. + name (str): + Required. The name of the HyperparameterTuningJob to + cancel. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelHyperparameterTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest): + request = job_service.CancelHyperparameterTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_hyperparameter_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_batch_prediction_job(self, + request: job_service.CreateBatchPredictionJobRequest = None, + *, + parent: str = None, + batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_batch_prediction_job.BatchPredictionJob: + r"""Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest): + The request object. Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. + parent (str): + Required. The resource name of the Location to create + the BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + batch_prediction_job (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob): + Required. The BatchPredictionJob to + create. + + This corresponds to the ``batch_prediction_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, batch_prediction_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateBatchPredictionJobRequest): + request = job_service.CreateBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if batch_prediction_job is not None: + request.batch_prediction_job = batch_prediction_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_batch_prediction_job(self, + request: job_service.GetBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> batch_prediction_job.BatchPredictionJob: + r"""Gets a BatchPredictionJob + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest): + The request object. Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: + A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions + on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the + instances fail, the job may finish without attempting + predictions for all remaining instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetBatchPredictionJobRequest): + request = job_service.GetBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_batch_prediction_jobs(self, + request: job_service.ListBatchPredictionJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBatchPredictionJobsPager: + r"""Lists BatchPredictionJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): + The request object. Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. + parent (str): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsPager: + Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListBatchPredictionJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListBatchPredictionJobsRequest): + request = job_service.ListBatchPredictionJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_batch_prediction_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBatchPredictionJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_batch_prediction_job(self, + request: job_service.DeleteBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest): + The request object. Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob resource to + be deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteBatchPredictionJobRequest): + request = job_service.DeleteBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_batch_prediction_job(self, + request: job_service.CancelBatchPredictionJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest): + The request object. Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. + name (str): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CancelBatchPredictionJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CancelBatchPredictionJobRequest): + request = job_service.CancelBatchPredictionJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_batch_prediction_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_model_deployment_monitoring_job(self, + request: job_service.CreateModelDeploymentMonitoringJobRequest = None, + *, + parent: str = None, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. + parent (str): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The + ModelDeploymentMonitoringJob to create + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model_deployment_monitoring_job]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.CreateModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.CreateModelDeploymentMonitoringJobRequest): + request = job_service.CreateModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def search_model_deployment_monitoring_stats_anomalies(self, + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest = None, + *, + model_deployment_monitoring_job: str = None, + deployed_model_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: + r"""Searches Model Monitoring Statistics generated within + a given time window. + + Args: + request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The request object. Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + model_deployment_monitoring_job (str): + Required. ModelDeploymentMonitoring Job resource name. + Format: + \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + Required. The DeployedModel ID of the + [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: + Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_model_deployment_monitoring_stats_anomalies] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_deployment_monitoring_job(self, + request: job_service.GetModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: + r"""Gets a ModelDeploymentMonitoringJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: + Represents a job that runs + periodically to monitor the deployed + models in an endpoint. It will analyze + the logged training & prediction data to + detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.GetModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.GetModelDeploymentMonitoringJobRequest): + request = job_service.GetModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_deployment_monitoring_jobs(self, + request: job_service.ListModelDeploymentMonitoringJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelDeploymentMonitoringJobsPager: + r"""Lists ModelDeploymentMonitoringJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): + The request object. Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + parent (str): + Required. The parent of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager: + Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ListModelDeploymentMonitoringJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ListModelDeploymentMonitoringJobsRequest): + request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_deployment_monitoring_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelDeploymentMonitoringJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_model_deployment_monitoring_job(self, + request: job_service.UpdateModelDeploymentMonitoringJobRequest = None, + *, + model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a ModelDeploymentMonitoringJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The model monitoring + configuration which replaces the + resource on the server. + + This corresponds to the ``model_deployment_monitoring_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an + endpoint. It will analyze the logged training & + prediction data to detect any abnormal behaviors. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model_deployment_monitoring_job, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.UpdateModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.UpdateModelDeploymentMonitoringJobRequest): + request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model_deployment_monitoring_job is not None: + request.model_deployment_monitoring_job = model_deployment_monitoring_job + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_model_deployment_monitoring_job(self, + request: job_service.DeleteModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a ModelDeploymentMonitoringJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the model monitoring job + to delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.DeleteModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.DeleteModelDeploymentMonitoringJobRequest): + request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def pause_model_deployment_monitoring_job(self, + request: job_service.PauseModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Args: + request (google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.PauseModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.PauseModelDeploymentMonitoringJobRequest): + request = job_service.PauseModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.pause_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def resume_model_deployment_monitoring_job(self, + request: job_service.ResumeModelDeploymentMonitoringJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest): + The request object. Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a job_service.ResumeModelDeploymentMonitoringJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, job_service.ResumeModelDeploymentMonitoringJobRequest): + request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.resume_model_deployment_monitoring_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "JobServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py new file mode 100644 index 0000000000..2b97cbd84a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py @@ -0,0 +1,756 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job + + +class ListCustomJobsPager: + """A pager for iterating through ``list_custom_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``custom_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListCustomJobs`` requests and continue to iterate + through the ``custom_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListCustomJobsResponse], + request: job_service.ListCustomJobsRequest, + response: job_service.ListCustomJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListCustomJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListCustomJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[custom_job.CustomJob]: + for page in self.pages: + yield from page.custom_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListCustomJobsAsyncPager: + """A pager for iterating through ``list_custom_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``custom_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListCustomJobs`` requests and continue to iterate + through the ``custom_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]], + request: job_service.ListCustomJobsRequest, + response: job_service.ListCustomJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListCustomJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.ListCustomJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[custom_job.CustomJob]: + async def async_generator(): + async for page in self.pages: + for response in page.custom_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataLabelingJobsPager: + """A pager for iterating through ``list_data_labeling_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``data_labeling_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListDataLabelingJobs`` requests and continue to iterate + through the ``data_labeling_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListDataLabelingJobsResponse], + request: job_service.ListDataLabelingJobsRequest, + response: job_service.ListDataLabelingJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListDataLabelingJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListDataLabelingJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[data_labeling_job.DataLabelingJob]: + for page in self.pages: + yield from page.data_labeling_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListDataLabelingJobsAsyncPager: + """A pager for iterating through ``list_data_labeling_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``data_labeling_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListDataLabelingJobs`` requests and continue to iterate + through the ``data_labeling_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]], + request: job_service.ListDataLabelingJobsRequest, + response: job_service.ListDataLabelingJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListDataLabelingJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.ListDataLabelingJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[data_labeling_job.DataLabelingJob]: + async def async_generator(): + async for page in self.pages: + for response in page.data_labeling_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHyperparameterTuningJobsPager: + """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``hyperparameter_tuning_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListHyperparameterTuningJobs`` requests and continue to iterate + through the ``hyperparameter_tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListHyperparameterTuningJobsResponse], + request: job_service.ListHyperparameterTuningJobsRequest, + response: job_service.ListHyperparameterTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListHyperparameterTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListHyperparameterTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[hyperparameter_tuning_job.HyperparameterTuningJob]: + for page in self.pages: + yield from page.hyperparameter_tuning_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHyperparameterTuningJobsAsyncPager: + """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``hyperparameter_tuning_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListHyperparameterTuningJobs`` requests and continue to iterate + through the ``hyperparameter_tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]], + request: job_service.ListHyperparameterTuningJobsRequest, + response: job_service.ListHyperparameterTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListHyperparameterTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.ListHyperparameterTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[hyperparameter_tuning_job.HyperparameterTuningJob]: + async def async_generator(): + async for page in self.pages: + for response in page.hyperparameter_tuning_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBatchPredictionJobsPager: + """A pager for iterating through ``list_batch_prediction_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``batch_prediction_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBatchPredictionJobs`` requests and continue to iterate + through the ``batch_prediction_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListBatchPredictionJobsResponse], + request: job_service.ListBatchPredictionJobsRequest, + response: job_service.ListBatchPredictionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListBatchPredictionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListBatchPredictionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[batch_prediction_job.BatchPredictionJob]: + for page in self.pages: + yield from page.batch_prediction_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBatchPredictionJobsAsyncPager: + """A pager for iterating through ``list_batch_prediction_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``batch_prediction_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBatchPredictionJobs`` requests and continue to iterate + through the ``batch_prediction_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]], + request: job_service.ListBatchPredictionJobsRequest, + response: job_service.ListBatchPredictionJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListBatchPredictionJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.ListBatchPredictionJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[batch_prediction_job.BatchPredictionJob]: + async def async_generator(): + async for page in self.pages: + for response in page.batch_prediction_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchModelDeploymentMonitoringStatsAnomaliesPager: + """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse], + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: + for page in self.pages: + yield from page.monitoring_stats + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: + """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``monitoring_stats`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate + through the ``monitoring_stats`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]], + request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, + response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: + async def async_generator(): + async for page in self.pages: + for response in page.monitoring_stats: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelDeploymentMonitoringJobsPager: + """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_deployment_monitoring_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate + through the ``model_deployment_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., job_service.ListModelDeploymentMonitoringJobsResponse], + request: job_service.ListModelDeploymentMonitoringJobsRequest, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[job_service.ListModelDeploymentMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + for page in self.pages: + yield from page.model_deployment_monitoring_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelDeploymentMonitoringJobsAsyncPager: + """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_deployment_monitoring_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate + through the ``model_deployment_monitoring_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]], + request: job_service.ListModelDeploymentMonitoringJobsRequest, + response: job_service.ListModelDeploymentMonitoringJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[job_service.ListModelDeploymentMonitoringJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + async def async_generator(): + async for page in self.pages: + for response in page.model_deployment_monitoring_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py new file mode 100644 index 0000000000..13c5f7ade5 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import JobServiceTransport +from .grpc import JobServiceGrpcTransport +from .grpc_asyncio import JobServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] +_transport_registry['grpc'] = JobServiceGrpcTransport +_transport_registry['grpc_asyncio'] = JobServiceGrpcAsyncIOTransport + +__all__ = ( + 'JobServiceTransport', + 'JobServiceGrpcTransport', + 'JobServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py new file mode 100644 index 0000000000..b611bb10d8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py @@ -0,0 +1,564 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class JobServiceTransport(abc.ABC): + """Abstract transport class for JobService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_custom_job: gapic_v1.method.wrap_method( + self.create_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_custom_job: gapic_v1.method.wrap_method( + self.get_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_custom_jobs: gapic_v1.method.wrap_method( + self.list_custom_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_custom_job: gapic_v1.method.wrap_method( + self.delete_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_custom_job: gapic_v1.method.wrap_method( + self.cancel_custom_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_data_labeling_job: gapic_v1.method.wrap_method( + self.create_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_data_labeling_job: gapic_v1.method.wrap_method( + self.get_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_data_labeling_jobs: gapic_v1.method.wrap_method( + self.list_data_labeling_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_data_labeling_job: gapic_v1.method.wrap_method( + self.delete_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_data_labeling_job: gapic_v1.method.wrap_method( + self.cancel_data_labeling_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.create_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.get_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( + self.list_hyperparameter_tuning_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.delete_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( + self.cancel_hyperparameter_tuning_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_batch_prediction_job: gapic_v1.method.wrap_method( + self.create_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.get_batch_prediction_job: gapic_v1.method.wrap_method( + self.get_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( + self.list_batch_prediction_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_batch_prediction_job: gapic_v1.method.wrap_method( + self.delete_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( + self.cancel_batch_prediction_job, + default_timeout=5.0, + client_info=client_info, + ), + self.create_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.create_model_deployment_monitoring_job, + default_timeout=60.0, + client_info=client_info, + ), + self.search_model_deployment_monitoring_stats_anomalies: gapic_v1.method.wrap_method( + self.search_model_deployment_monitoring_stats_anomalies, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.get_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_deployment_monitoring_jobs: gapic_v1.method.wrap_method( + self.list_model_deployment_monitoring_jobs, + default_timeout=5.0, + client_info=client_info, + ), + self.update_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.update_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.delete_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.pause_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.pause_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + self.resume_model_deployment_monitoring_job: gapic_v1.method.wrap_method( + self.resume_model_deployment_monitoring_job, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + Union[ + gca_custom_job.CustomJob, + Awaitable[gca_custom_job.CustomJob] + ]]: + raise NotImplementedError() + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + Union[ + custom_job.CustomJob, + Awaitable[custom_job.CustomJob] + ]]: + raise NotImplementedError() + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + Union[ + job_service.ListCustomJobsResponse, + Awaitable[job_service.ListCustomJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + Union[ + gca_data_labeling_job.DataLabelingJob, + Awaitable[gca_data_labeling_job.DataLabelingJob] + ]]: + raise NotImplementedError() + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + Union[ + data_labeling_job.DataLabelingJob, + Awaitable[data_labeling_job.DataLabelingJob] + ]]: + raise NotImplementedError() + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + Union[ + job_service.ListDataLabelingJobsResponse, + Awaitable[job_service.ListDataLabelingJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + Union[ + gca_hyperparameter_tuning_job.HyperparameterTuningJob, + Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob] + ]]: + raise NotImplementedError() + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + Union[ + hyperparameter_tuning_job.HyperparameterTuningJob, + Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob] + ]]: + raise NotImplementedError() + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + Union[ + job_service.ListHyperparameterTuningJobsResponse, + Awaitable[job_service.ListHyperparameterTuningJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + Union[ + gca_batch_prediction_job.BatchPredictionJob, + Awaitable[gca_batch_prediction_job.BatchPredictionJob] + ]]: + raise NotImplementedError() + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + Union[ + batch_prediction_job.BatchPredictionJob, + Awaitable[batch_prediction_job.BatchPredictionJob] + ]]: + raise NotImplementedError() + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + Union[ + job_service.ListBatchPredictionJobsResponse, + Awaitable[job_service.ListBatchPredictionJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_model_deployment_monitoring_job(self) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + Union[ + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob] + ]]: + raise NotImplementedError() + + @property + def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Union[ + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, + Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse] + ]]: + raise NotImplementedError() + + @property + def get_model_deployment_monitoring_job(self) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + Union[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob] + ]]: + raise NotImplementedError() + + @property + def list_model_deployment_monitoring_jobs(self) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + Union[ + job_service.ListModelDeploymentMonitoringJobsResponse, + Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse] + ]]: + raise NotImplementedError() + + @property + def update_model_deployment_monitoring_job(self) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_model_deployment_monitoring_job(self) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def pause_model_deployment_monitoring_job(self) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def resume_model_deployment_monitoring_job(self) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'JobServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py new file mode 100644 index 0000000000..55a5ebcbb2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py @@ -0,0 +1,1043 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobServiceTransport, DEFAULT_CLIENT_INFO + + +class JobServiceGrpcTransport(JobServiceTransport): + """gRPC backend transport for JobService. + + A service for creating and managing Vertex AI's jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + gca_custom_job.CustomJob]: + r"""Return a callable for the create custom job method over gRPC. + + Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateCustomJobRequest], + ~.CustomJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_custom_job' not in self._stubs: + self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateCustomJob', + request_serializer=job_service.CreateCustomJobRequest.serialize, + response_deserializer=gca_custom_job.CustomJob.deserialize, + ) + return self._stubs['create_custom_job'] + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + custom_job.CustomJob]: + r"""Return a callable for the get custom job method over gRPC. + + Gets a CustomJob. + + Returns: + Callable[[~.GetCustomJobRequest], + ~.CustomJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_custom_job' not in self._stubs: + self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetCustomJob', + request_serializer=job_service.GetCustomJobRequest.serialize, + response_deserializer=custom_job.CustomJob.deserialize, + ) + return self._stubs['get_custom_job'] + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + job_service.ListCustomJobsResponse]: + r"""Return a callable for the list custom jobs method over gRPC. + + Lists CustomJobs in a Location. + + Returns: + Callable[[~.ListCustomJobsRequest], + ~.ListCustomJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_custom_jobs' not in self._stubs: + self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListCustomJobs', + request_serializer=job_service.ListCustomJobsRequest.serialize, + response_deserializer=job_service.ListCustomJobsResponse.deserialize, + ) + return self._stubs['list_custom_jobs'] + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete custom job method over gRPC. + + Deletes a CustomJob. + + Returns: + Callable[[~.DeleteCustomJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_custom_job' not in self._stubs: + self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteCustomJob', + request_serializer=job_service.DeleteCustomJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_custom_job'] + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel custom job method over gRPC. + + Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelCustomJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_custom_job' not in self._stubs: + self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelCustomJob', + request_serializer=job_service.CancelCustomJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_custom_job'] + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + gca_data_labeling_job.DataLabelingJob]: + r"""Return a callable for the create data labeling job method over gRPC. + + Creates a DataLabelingJob. + + Returns: + Callable[[~.CreateDataLabelingJobRequest], + ~.DataLabelingJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_data_labeling_job' not in self._stubs: + self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateDataLabelingJob', + request_serializer=job_service.CreateDataLabelingJobRequest.serialize, + response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['create_data_labeling_job'] + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + data_labeling_job.DataLabelingJob]: + r"""Return a callable for the get data labeling job method over gRPC. + + Gets a DataLabelingJob. + + Returns: + Callable[[~.GetDataLabelingJobRequest], + ~.DataLabelingJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_data_labeling_job' not in self._stubs: + self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetDataLabelingJob', + request_serializer=job_service.GetDataLabelingJobRequest.serialize, + response_deserializer=data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['get_data_labeling_job'] + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + job_service.ListDataLabelingJobsResponse]: + r"""Return a callable for the list data labeling jobs method over gRPC. + + Lists DataLabelingJobs in a Location. + + Returns: + Callable[[~.ListDataLabelingJobsRequest], + ~.ListDataLabelingJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_labeling_jobs' not in self._stubs: + self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListDataLabelingJobs', + request_serializer=job_service.ListDataLabelingJobsRequest.serialize, + response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, + ) + return self._stubs['list_data_labeling_jobs'] + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete data labeling job method over gRPC. + + Deletes a DataLabelingJob. + + Returns: + Callable[[~.DeleteDataLabelingJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_data_labeling_job' not in self._stubs: + self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteDataLabelingJob', + request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_data_labeling_job'] + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel data labeling job method over gRPC. + + Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Returns: + Callable[[~.CancelDataLabelingJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_data_labeling_job' not in self._stubs: + self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelDataLabelingJob', + request_serializer=job_service.CancelDataLabelingJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_data_labeling_job'] + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + gca_hyperparameter_tuning_job.HyperparameterTuningJob]: + r"""Return a callable for the create hyperparameter tuning + job method over gRPC. + + Creates a HyperparameterTuningJob + + Returns: + Callable[[~.CreateHyperparameterTuningJobRequest], + ~.HyperparameterTuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_hyperparameter_tuning_job' not in self._stubs: + self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateHyperparameterTuningJob', + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['create_hyperparameter_tuning_job'] + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + hyperparameter_tuning_job.HyperparameterTuningJob]: + r"""Return a callable for the get hyperparameter tuning job method over gRPC. + + Gets a HyperparameterTuningJob + + Returns: + Callable[[~.GetHyperparameterTuningJobRequest], + ~.HyperparameterTuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_hyperparameter_tuning_job' not in self._stubs: + self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetHyperparameterTuningJob', + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['get_hyperparameter_tuning_job'] + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + job_service.ListHyperparameterTuningJobsResponse]: + r"""Return a callable for the list hyperparameter tuning + jobs method over gRPC. + + Lists HyperparameterTuningJobs in a Location. + + Returns: + Callable[[~.ListHyperparameterTuningJobsRequest], + ~.ListHyperparameterTuningJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hyperparameter_tuning_jobs' not in self._stubs: + self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListHyperparameterTuningJobs', + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) + return self._stubs['list_hyperparameter_tuning_jobs'] + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete hyperparameter tuning + job method over gRPC. + + Deletes a HyperparameterTuningJob. + + Returns: + Callable[[~.DeleteHyperparameterTuningJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_hyperparameter_tuning_job' not in self._stubs: + self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteHyperparameterTuningJob', + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_hyperparameter_tuning_job'] + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel hyperparameter tuning + job method over gRPC. + + Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelHyperparameterTuningJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_hyperparameter_tuning_job' not in self._stubs: + self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelHyperparameterTuningJob', + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_hyperparameter_tuning_job'] + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + gca_batch_prediction_job.BatchPredictionJob]: + r"""Return a callable for the create batch prediction job method over gRPC. + + Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Returns: + Callable[[~.CreateBatchPredictionJobRequest], + ~.BatchPredictionJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_batch_prediction_job' not in self._stubs: + self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateBatchPredictionJob', + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['create_batch_prediction_job'] + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + batch_prediction_job.BatchPredictionJob]: + r"""Return a callable for the get batch prediction job method over gRPC. + + Gets a BatchPredictionJob + + Returns: + Callable[[~.GetBatchPredictionJobRequest], + ~.BatchPredictionJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_batch_prediction_job' not in self._stubs: + self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetBatchPredictionJob', + request_serializer=job_service.GetBatchPredictionJobRequest.serialize, + response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['get_batch_prediction_job'] + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + job_service.ListBatchPredictionJobsResponse]: + r"""Return a callable for the list batch prediction jobs method over gRPC. + + Lists BatchPredictionJobs in a Location. + + Returns: + Callable[[~.ListBatchPredictionJobsRequest], + ~.ListBatchPredictionJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_batch_prediction_jobs' not in self._stubs: + self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListBatchPredictionJobs', + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) + return self._stubs['list_batch_prediction_jobs'] + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete batch prediction job method over gRPC. + + Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Returns: + Callable[[~.DeleteBatchPredictionJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_batch_prediction_job' not in self._stubs: + self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteBatchPredictionJob', + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_batch_prediction_job'] + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel batch prediction job method over gRPC. + + Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Returns: + Callable[[~.CancelBatchPredictionJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_batch_prediction_job' not in self._stubs: + self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelBatchPredictionJob', + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_batch_prediction_job'] + + @property + def create_model_deployment_monitoring_job(self) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + r"""Return a callable for the create model deployment + monitoring job method over gRPC. + + Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Returns: + Callable[[~.CreateModelDeploymentMonitoringJobRequest], + ~.ModelDeploymentMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model_deployment_monitoring_job' not in self._stubs: + self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob', + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['create_model_deployment_monitoring_job'] + + @property + def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + r"""Return a callable for the search model deployment + monitoring stats anomalies method over gRPC. + + Searches Model Monitoring Statistics generated within + a given time window. + + Returns: + Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + ~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: + self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) + return self._stubs['search_model_deployment_monitoring_stats_anomalies'] + + @property + def get_model_deployment_monitoring_job(self) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: + r"""Return a callable for the get model deployment + monitoring job method over gRPC. + + Gets a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.GetModelDeploymentMonitoringJobRequest], + ~.ModelDeploymentMonitoringJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_deployment_monitoring_job' not in self._stubs: + self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob', + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['get_model_deployment_monitoring_job'] + + @property + def list_model_deployment_monitoring_jobs(self) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + job_service.ListModelDeploymentMonitoringJobsResponse]: + r"""Return a callable for the list model deployment + monitoring jobs method over gRPC. + + Lists ModelDeploymentMonitoringJobs in a Location. + + Returns: + Callable[[~.ListModelDeploymentMonitoringJobsRequest], + ~.ListModelDeploymentMonitoringJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_deployment_monitoring_jobs' not in self._stubs: + self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs', + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) + return self._stubs['list_model_deployment_monitoring_jobs'] + + @property + def update_model_deployment_monitoring_job(self) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the update model deployment + monitoring job method over gRPC. + + Updates a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.UpdateModelDeploymentMonitoringJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model_deployment_monitoring_job' not in self._stubs: + self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob', + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_model_deployment_monitoring_job'] + + @property + def delete_model_deployment_monitoring_job(self) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model deployment + monitoring job method over gRPC. + + Deletes a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.DeleteModelDeploymentMonitoringJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_deployment_monitoring_job' not in self._stubs: + self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob', + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_deployment_monitoring_job'] + + @property + def pause_model_deployment_monitoring_job(self) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the pause model deployment + monitoring job method over gRPC. + + Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Returns: + Callable[[~.PauseModelDeploymentMonitoringJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'pause_model_deployment_monitoring_job' not in self._stubs: + self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob', + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['pause_model_deployment_monitoring_job'] + + @property + def resume_model_deployment_monitoring_job(self) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the resume model deployment + monitoring job method over gRPC. + + Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Returns: + Callable[[~.ResumeModelDeploymentMonitoringJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'resume_model_deployment_monitoring_job' not in self._stubs: + self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob', + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['resume_model_deployment_monitoring_job'] + + +__all__ = ( + 'JobServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..cc7102577e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py @@ -0,0 +1,1047 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import JobServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import JobServiceGrpcTransport + + +class JobServiceGrpcAsyncIOTransport(JobServiceTransport): + """gRPC AsyncIO backend transport for JobService. + + A service for creating and managing Vertex AI's jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_custom_job(self) -> Callable[ + [job_service.CreateCustomJobRequest], + Awaitable[gca_custom_job.CustomJob]]: + r"""Return a callable for the create custom job method over gRPC. + + Creates a CustomJob. A created CustomJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateCustomJobRequest], + Awaitable[~.CustomJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_custom_job' not in self._stubs: + self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateCustomJob', + request_serializer=job_service.CreateCustomJobRequest.serialize, + response_deserializer=gca_custom_job.CustomJob.deserialize, + ) + return self._stubs['create_custom_job'] + + @property + def get_custom_job(self) -> Callable[ + [job_service.GetCustomJobRequest], + Awaitable[custom_job.CustomJob]]: + r"""Return a callable for the get custom job method over gRPC. + + Gets a CustomJob. + + Returns: + Callable[[~.GetCustomJobRequest], + Awaitable[~.CustomJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_custom_job' not in self._stubs: + self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetCustomJob', + request_serializer=job_service.GetCustomJobRequest.serialize, + response_deserializer=custom_job.CustomJob.deserialize, + ) + return self._stubs['get_custom_job'] + + @property + def list_custom_jobs(self) -> Callable[ + [job_service.ListCustomJobsRequest], + Awaitable[job_service.ListCustomJobsResponse]]: + r"""Return a callable for the list custom jobs method over gRPC. + + Lists CustomJobs in a Location. + + Returns: + Callable[[~.ListCustomJobsRequest], + Awaitable[~.ListCustomJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_custom_jobs' not in self._stubs: + self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListCustomJobs', + request_serializer=job_service.ListCustomJobsRequest.serialize, + response_deserializer=job_service.ListCustomJobsResponse.deserialize, + ) + return self._stubs['list_custom_jobs'] + + @property + def delete_custom_job(self) -> Callable[ + [job_service.DeleteCustomJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete custom job method over gRPC. + + Deletes a CustomJob. + + Returns: + Callable[[~.DeleteCustomJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_custom_job' not in self._stubs: + self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteCustomJob', + request_serializer=job_service.DeleteCustomJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_custom_job'] + + @property + def cancel_custom_job(self) -> Callable[ + [job_service.CancelCustomJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel custom job method over gRPC. + + Cancels a CustomJob. Starts asynchronous cancellation on the + CustomJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the CustomJob is not deleted; instead it becomes a + job with a + [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelCustomJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_custom_job' not in self._stubs: + self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelCustomJob', + request_serializer=job_service.CancelCustomJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_custom_job'] + + @property + def create_data_labeling_job(self) -> Callable[ + [job_service.CreateDataLabelingJobRequest], + Awaitable[gca_data_labeling_job.DataLabelingJob]]: + r"""Return a callable for the create data labeling job method over gRPC. + + Creates a DataLabelingJob. + + Returns: + Callable[[~.CreateDataLabelingJobRequest], + Awaitable[~.DataLabelingJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_data_labeling_job' not in self._stubs: + self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateDataLabelingJob', + request_serializer=job_service.CreateDataLabelingJobRequest.serialize, + response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['create_data_labeling_job'] + + @property + def get_data_labeling_job(self) -> Callable[ + [job_service.GetDataLabelingJobRequest], + Awaitable[data_labeling_job.DataLabelingJob]]: + r"""Return a callable for the get data labeling job method over gRPC. + + Gets a DataLabelingJob. + + Returns: + Callable[[~.GetDataLabelingJobRequest], + Awaitable[~.DataLabelingJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_data_labeling_job' not in self._stubs: + self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetDataLabelingJob', + request_serializer=job_service.GetDataLabelingJobRequest.serialize, + response_deserializer=data_labeling_job.DataLabelingJob.deserialize, + ) + return self._stubs['get_data_labeling_job'] + + @property + def list_data_labeling_jobs(self) -> Callable[ + [job_service.ListDataLabelingJobsRequest], + Awaitable[job_service.ListDataLabelingJobsResponse]]: + r"""Return a callable for the list data labeling jobs method over gRPC. + + Lists DataLabelingJobs in a Location. + + Returns: + Callable[[~.ListDataLabelingJobsRequest], + Awaitable[~.ListDataLabelingJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_data_labeling_jobs' not in self._stubs: + self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListDataLabelingJobs', + request_serializer=job_service.ListDataLabelingJobsRequest.serialize, + response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, + ) + return self._stubs['list_data_labeling_jobs'] + + @property + def delete_data_labeling_job(self) -> Callable[ + [job_service.DeleteDataLabelingJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete data labeling job method over gRPC. + + Deletes a DataLabelingJob. + + Returns: + Callable[[~.DeleteDataLabelingJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_data_labeling_job' not in self._stubs: + self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteDataLabelingJob', + request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_data_labeling_job'] + + @property + def cancel_data_labeling_job(self) -> Callable[ + [job_service.CancelDataLabelingJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel data labeling job method over gRPC. + + Cancels a DataLabelingJob. Success of cancellation is + not guaranteed. + + Returns: + Callable[[~.CancelDataLabelingJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_data_labeling_job' not in self._stubs: + self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelDataLabelingJob', + request_serializer=job_service.CancelDataLabelingJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_data_labeling_job'] + + @property + def create_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CreateHyperparameterTuningJobRequest], + Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob]]: + r"""Return a callable for the create hyperparameter tuning + job method over gRPC. + + Creates a HyperparameterTuningJob + + Returns: + Callable[[~.CreateHyperparameterTuningJobRequest], + Awaitable[~.HyperparameterTuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_hyperparameter_tuning_job' not in self._stubs: + self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateHyperparameterTuningJob', + request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, + response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['create_hyperparameter_tuning_job'] + + @property + def get_hyperparameter_tuning_job(self) -> Callable[ + [job_service.GetHyperparameterTuningJobRequest], + Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob]]: + r"""Return a callable for the get hyperparameter tuning job method over gRPC. + + Gets a HyperparameterTuningJob + + Returns: + Callable[[~.GetHyperparameterTuningJobRequest], + Awaitable[~.HyperparameterTuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_hyperparameter_tuning_job' not in self._stubs: + self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetHyperparameterTuningJob', + request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, + response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, + ) + return self._stubs['get_hyperparameter_tuning_job'] + + @property + def list_hyperparameter_tuning_jobs(self) -> Callable[ + [job_service.ListHyperparameterTuningJobsRequest], + Awaitable[job_service.ListHyperparameterTuningJobsResponse]]: + r"""Return a callable for the list hyperparameter tuning + jobs method over gRPC. + + Lists HyperparameterTuningJobs in a Location. + + Returns: + Callable[[~.ListHyperparameterTuningJobsRequest], + Awaitable[~.ListHyperparameterTuningJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hyperparameter_tuning_jobs' not in self._stubs: + self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListHyperparameterTuningJobs', + request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, + response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, + ) + return self._stubs['list_hyperparameter_tuning_jobs'] + + @property + def delete_hyperparameter_tuning_job(self) -> Callable[ + [job_service.DeleteHyperparameterTuningJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete hyperparameter tuning + job method over gRPC. + + Deletes a HyperparameterTuningJob. + + Returns: + Callable[[~.DeleteHyperparameterTuningJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_hyperparameter_tuning_job' not in self._stubs: + self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteHyperparameterTuningJob', + request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_hyperparameter_tuning_job'] + + @property + def cancel_hyperparameter_tuning_job(self) -> Callable[ + [job_service.CancelHyperparameterTuningJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel hyperparameter tuning + job method over gRPC. + + Cancels a HyperparameterTuningJob. Starts asynchronous + cancellation on the HyperparameterTuningJob. The server makes a + best effort to cancel the job, but success is not guaranteed. + Clients can use + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the HyperparameterTuningJob is not deleted; + instead it becomes a job with a + [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelHyperparameterTuningJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_hyperparameter_tuning_job' not in self._stubs: + self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelHyperparameterTuningJob', + request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_hyperparameter_tuning_job'] + + @property + def create_batch_prediction_job(self) -> Callable[ + [job_service.CreateBatchPredictionJobRequest], + Awaitable[gca_batch_prediction_job.BatchPredictionJob]]: + r"""Return a callable for the create batch prediction job method over gRPC. + + Creates a BatchPredictionJob. A BatchPredictionJob + once created will right away be attempted to start. + + Returns: + Callable[[~.CreateBatchPredictionJobRequest], + Awaitable[~.BatchPredictionJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_batch_prediction_job' not in self._stubs: + self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateBatchPredictionJob', + request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, + response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['create_batch_prediction_job'] + + @property + def get_batch_prediction_job(self) -> Callable[ + [job_service.GetBatchPredictionJobRequest], + Awaitable[batch_prediction_job.BatchPredictionJob]]: + r"""Return a callable for the get batch prediction job method over gRPC. + + Gets a BatchPredictionJob + + Returns: + Callable[[~.GetBatchPredictionJobRequest], + Awaitable[~.BatchPredictionJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_batch_prediction_job' not in self._stubs: + self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetBatchPredictionJob', + request_serializer=job_service.GetBatchPredictionJobRequest.serialize, + response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, + ) + return self._stubs['get_batch_prediction_job'] + + @property + def list_batch_prediction_jobs(self) -> Callable[ + [job_service.ListBatchPredictionJobsRequest], + Awaitable[job_service.ListBatchPredictionJobsResponse]]: + r"""Return a callable for the list batch prediction jobs method over gRPC. + + Lists BatchPredictionJobs in a Location. + + Returns: + Callable[[~.ListBatchPredictionJobsRequest], + Awaitable[~.ListBatchPredictionJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_batch_prediction_jobs' not in self._stubs: + self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListBatchPredictionJobs', + request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, + response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, + ) + return self._stubs['list_batch_prediction_jobs'] + + @property + def delete_batch_prediction_job(self) -> Callable[ + [job_service.DeleteBatchPredictionJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete batch prediction job method over gRPC. + + Deletes a BatchPredictionJob. Can only be called on + jobs that already finished. + + Returns: + Callable[[~.DeleteBatchPredictionJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_batch_prediction_job' not in self._stubs: + self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteBatchPredictionJob', + request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_batch_prediction_job'] + + @property + def cancel_batch_prediction_job(self) -> Callable[ + [job_service.CancelBatchPredictionJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel batch prediction job method over gRPC. + + Cancels a BatchPredictionJob. + + Starts asynchronous cancellation on the BatchPredictionJob. The + server makes the best effort to cancel the job, but success is + not guaranteed. Clients can use + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On a successful + cancellation, the BatchPredictionJob is not deleted;instead its + [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] + is set to ``CANCELLED``. Any files already outputted by the job + are not deleted. + + Returns: + Callable[[~.CancelBatchPredictionJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_batch_prediction_job' not in self._stubs: + self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CancelBatchPredictionJob', + request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_batch_prediction_job'] + + @property + def create_model_deployment_monitoring_job(self) -> Callable[ + [job_service.CreateModelDeploymentMonitoringJobRequest], + Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: + r"""Return a callable for the create model deployment + monitoring job method over gRPC. + + Creates a ModelDeploymentMonitoringJob. It will run + periodically on a configured interval. + + Returns: + Callable[[~.CreateModelDeploymentMonitoringJobRequest], + Awaitable[~.ModelDeploymentMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_model_deployment_monitoring_job' not in self._stubs: + self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob', + request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['create_model_deployment_monitoring_job'] + + @property + def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ + [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: + r"""Return a callable for the search model deployment + monitoring stats anomalies method over gRPC. + + Searches Model Monitoring Statistics generated within + a given time window. + + Returns: + Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], + Awaitable[~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: + self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', + request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, + response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, + ) + return self._stubs['search_model_deployment_monitoring_stats_anomalies'] + + @property + def get_model_deployment_monitoring_job(self) -> Callable[ + [job_service.GetModelDeploymentMonitoringJobRequest], + Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: + r"""Return a callable for the get model deployment + monitoring job method over gRPC. + + Gets a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.GetModelDeploymentMonitoringJobRequest], + Awaitable[~.ModelDeploymentMonitoringJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_deployment_monitoring_job' not in self._stubs: + self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob', + request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, + ) + return self._stubs['get_model_deployment_monitoring_job'] + + @property + def list_model_deployment_monitoring_jobs(self) -> Callable[ + [job_service.ListModelDeploymentMonitoringJobsRequest], + Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]]: + r"""Return a callable for the list model deployment + monitoring jobs method over gRPC. + + Lists ModelDeploymentMonitoringJobs in a Location. + + Returns: + Callable[[~.ListModelDeploymentMonitoringJobsRequest], + Awaitable[~.ListModelDeploymentMonitoringJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_deployment_monitoring_jobs' not in self._stubs: + self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs', + request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, + response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, + ) + return self._stubs['list_model_deployment_monitoring_jobs'] + + @property + def update_model_deployment_monitoring_job(self) -> Callable[ + [job_service.UpdateModelDeploymentMonitoringJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update model deployment + monitoring job method over gRPC. + + Updates a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.UpdateModelDeploymentMonitoringJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model_deployment_monitoring_job' not in self._stubs: + self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob', + request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_model_deployment_monitoring_job'] + + @property + def delete_model_deployment_monitoring_job(self) -> Callable[ + [job_service.DeleteModelDeploymentMonitoringJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model deployment + monitoring job method over gRPC. + + Deletes a ModelDeploymentMonitoringJob. + + Returns: + Callable[[~.DeleteModelDeploymentMonitoringJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model_deployment_monitoring_job' not in self._stubs: + self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob', + request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model_deployment_monitoring_job'] + + @property + def pause_model_deployment_monitoring_job(self) -> Callable[ + [job_service.PauseModelDeploymentMonitoringJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the pause model deployment + monitoring job method over gRPC. + + Pauses a ModelDeploymentMonitoringJob. If the job is running, + the server makes a best effort to cancel the job. Will mark + [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] + to 'PAUSED'. + + Returns: + Callable[[~.PauseModelDeploymentMonitoringJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'pause_model_deployment_monitoring_job' not in self._stubs: + self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob', + request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['pause_model_deployment_monitoring_job'] + + @property + def resume_model_deployment_monitoring_job(self) -> Callable[ + [job_service.ResumeModelDeploymentMonitoringJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the resume model deployment + monitoring job method over gRPC. + + Resumes a paused ModelDeploymentMonitoringJob. It + will start to run from next scheduled time. A deleted + ModelDeploymentMonitoringJob can't be resumed. + + Returns: + Callable[[~.ResumeModelDeploymentMonitoringJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'resume_model_deployment_monitoring_job' not in self._stubs: + self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob', + request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['resume_model_deployment_monitoring_job'] + + +__all__ = ( + 'JobServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py new file mode 100644 index 0000000000..b0a31fc612 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import MetadataServiceClient +from .async_client import MetadataServiceAsyncClient + +__all__ = ( + 'MetadataServiceClient', + 'MetadataServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py new file mode 100644 index 0000000000..67ae60978b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py @@ -0,0 +1,2516 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport +from .client import MetadataServiceClient + + +class MetadataServiceAsyncClient: + """Service for reading and writing metadata entries.""" + + _client: MetadataServiceClient + + DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT + + artifact_path = staticmethod(MetadataServiceClient.artifact_path) + parse_artifact_path = staticmethod(MetadataServiceClient.parse_artifact_path) + context_path = staticmethod(MetadataServiceClient.context_path) + parse_context_path = staticmethod(MetadataServiceClient.parse_context_path) + execution_path = staticmethod(MetadataServiceClient.execution_path) + parse_execution_path = staticmethod(MetadataServiceClient.parse_execution_path) + metadata_schema_path = staticmethod(MetadataServiceClient.metadata_schema_path) + parse_metadata_schema_path = staticmethod(MetadataServiceClient.parse_metadata_schema_path) + metadata_store_path = staticmethod(MetadataServiceClient.metadata_store_path) + parse_metadata_store_path = staticmethod(MetadataServiceClient.parse_metadata_store_path) + common_billing_account_path = staticmethod(MetadataServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(MetadataServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(MetadataServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(MetadataServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(MetadataServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(MetadataServiceClient.parse_common_organization_path) + common_project_path = staticmethod(MetadataServiceClient.common_project_path) + parse_common_project_path = staticmethod(MetadataServiceClient.parse_common_project_path) + common_location_path = staticmethod(MetadataServiceClient.common_location_path) + parse_common_location_path = staticmethod(MetadataServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceAsyncClient: The constructed client. + """ + return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetadataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, MetadataServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = MetadataServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_metadata_store(self, + request: metadata_service.CreateMetadataStoreRequest = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Initializes a MetadataStore, including allocation of + resources. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest`): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + parent (:class:`str`): + Required. The resource name of the + Location where the MetadataStore should + be created. Format: + projects/{project}/locations/{location}/ + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (:class:`google.cloud.aiplatform_v1beta1.types.MetadataStore`): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (:class:`str`): + The {metadatastore} portion of the resource name with + the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_store, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_metadata_store(self, + request: metadata_service.GetMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest`): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. + name (:class:`str`): + Required. The resource name of the + MetadataStore to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_store, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_metadata_stores(self, + request: metadata_service.ListMetadataStoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresAsyncPager: + r"""Lists MetadataStores for a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest`): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + parent (:class:`str`): + Required. The Location whose + MetadataStores should be listed. Format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresAsyncPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListMetadataStoresRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_stores, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataStoresAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_metadata_store(self, + request: metadata_service.DeleteMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a single MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest`): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + name (:class:`str`): + Required. The resource name of the + MetadataStore to delete. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteMetadataStoreRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_metadata_store, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_artifact(self, + request: metadata_service.CreateArtifactRequest = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest`): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. + parent (:class:`str`): + Required. The resource name of the + MetadataStore where the Artifact should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (:class:`str`): + The {artifact} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_artifact, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_artifact(self, + request: metadata_service.GetArtifactRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetArtifactRequest`): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. + name (:class:`str`): + Required. The resource name of the + Artifact to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_artifact, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_artifacts(self, + request: metadata_service.ListArtifactsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsAsyncPager: + r"""Lists Artifacts in the MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest`): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + parent (:class:`str`): + Required. The MetadataStore whose + Artifacts should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsAsyncPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListArtifactsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_artifacts, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListArtifactsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_artifact(self, + request: metadata_service.UpdateArtifactRequest = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest`): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. + artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.UpdateArtifactRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_artifact, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact.name", request.artifact.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_context(self, + request: metadata_service.CreateContextRequest = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateContextRequest`): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. + parent (:class:`str`): + Required. The resource name of the + MetadataStore where the Context should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (:class:`str`): + The {context} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_context(self, + request: metadata_service.GetContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetContextRequest`): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. + name (:class:`str`): + Required. The resource name of the + Context to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_contexts(self, + request: metadata_service.ListContextsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsAsyncPager: + r"""Lists Contexts on the MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListContextsRequest`): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + parent (:class:`str`): + Required. The MetadataStore whose + Contexts should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsAsyncPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_contexts, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListContextsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_context(self, + request: metadata_service.UpdateContextRequest = None, + *, + context: gca_context.Context = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateContextRequest`): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. + context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1beta1.Context.name] + field is used to identify the Context to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.UpdateContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context.name", request.context.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_context(self, + request: metadata_service.DeleteContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a stored Context. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteContextRequest`): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. + name (:class:`str`): + Required. The resource name of the + Context to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.DeleteContextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_context, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def add_context_artifacts_and_executions(self, + request: metadata_service.AddContextArtifactsAndExecutionsRequest = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest`): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + context (:class:`str`): + Required. The resource name of the + Context that the Artifacts and + Executions belong to. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (:class:`Sequence[str]`): + The resource names of the Artifacts + to attribute to the Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (:class:`Sequence[str]`): + The resource names of the Executions + to associate with the Context. + + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if artifacts: + request.artifacts.extend(artifacts) + if executions: + request.executions.extend(executions) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_artifacts_and_executions, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def add_context_children(self, + request: metadata_service.AddContextChildrenRequest = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest`): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + context (:class:`str`): + Required. The resource name of the + parent Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (:class:`Sequence[str]`): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.AddContextChildrenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if child_contexts: + request.child_contexts.extend(child_contexts) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_context_children, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_context_lineage_subgraph(self, + request: metadata_service.QueryContextLineageSubgraphRequest = None, + *, + context: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest`): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. + context (:class:`str`): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.QueryContextLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_context_lineage_subgraph, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_execution(self, + request: metadata_service.CreateExecutionRequest = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest`): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. + parent (:class:`str`): + Required. The resource name of the + MetadataStore where the Execution should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (:class:`str`): + The {execution} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_execution, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_execution(self, + request: metadata_service.GetExecutionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetExecutionRequest`): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. + name (:class:`str`): + Required. The resource name of the + Execution to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_execution, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_executions(self, + request: metadata_service.ListExecutionsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsAsyncPager: + r"""Lists Executions in the MetadataStore. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest`): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + parent (:class:`str`): + Required. The MetadataStore whose + Executions should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsAsyncPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListExecutionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_executions, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListExecutionsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_execution(self, + request: metadata_service.UpdateExecutionRequest = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest`): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. + execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] + field is used to identify the Execution to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.UpdateExecutionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_execution, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution.name", request.execution.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def add_execution_events(self, + request: metadata_service.AddExecutionEventsRequest = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest`): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + execution (:class:`str`): + Required. The resource name of the + Execution that the Events connect + Artifacts with. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.Event]`): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.AddExecutionEventsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if events: + request.events.extend(events) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_execution_events, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_execution_inputs_and_outputs(self, + request: metadata_service.QueryExecutionInputsAndOutputsRequest = None, + *, + execution: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest`): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (:class:`str`): + Required. The resource name of the + Execution whose input and output + Artifacts should be retrieved as a + LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_execution_inputs_and_outputs, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_metadata_schema(self, + request: metadata_service.CreateMetadataSchemaRequest = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates a MetadataSchema. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest`): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. + parent (:class:`str`): + Required. The resource name of the + MetadataStore where the MetadataSchema + should be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (:class:`google.cloud.aiplatform_v1beta1.types.MetadataSchema`): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (:class:`str`): + The {metadata_schema} portion of the resource name with + the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.CreateMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_metadata_schema, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_metadata_schema(self, + request: metadata_service.GetMetadataSchemaRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest`): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. + name (:class:`str`): + Required. The resource name of the + MetadataSchema to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.GetMetadataSchemaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_metadata_schema, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_metadata_schemas(self, + request: metadata_service.ListMetadataSchemasRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasAsyncPager: + r"""Lists MetadataSchemas. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest`): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + parent (:class:`str`): + Required. The MetadataStore whose + MetadataSchemas should be listed. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.ListMetadataSchemasRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_metadata_schemas, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMetadataSchemasAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_artifact_lineage_subgraph(self, + request: metadata_service.QueryArtifactLineageSubgraphRequest = None, + *, + artifact: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest`): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (:class:`str`): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact", request.artifact), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MetadataServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py new file mode 100644 index 0000000000..3ef295ab55 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py @@ -0,0 +1,2739 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MetadataServiceGrpcTransport +from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +class MetadataServiceClientMeta(type): + """Metaclass for the MetadataService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] + _transport_registry["grpc"] = MetadataServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[MetadataServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MetadataServiceClient(metaclass=MetadataServiceClientMeta): + """Service for reading and writing metadata entries.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MetadataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MetadataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MetadataServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str,str]: + """Parses a artifact path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str,str]: + """Parses a context path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str,str]: + """Parses a execution path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def metadata_schema_path(project: str,location: str,metadata_store: str,metadata_schema: str,) -> str: + """Returns a fully-qualified metadata_schema string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) + + @staticmethod + def parse_metadata_schema_path(path: str) -> Dict[str,str]: + """Parses a metadata_schema path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/metadataSchemas/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def metadata_store_path(project: str,location: str,metadata_store: str,) -> str: + """Returns a fully-qualified metadata_store string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) + + @staticmethod + def parse_metadata_store_path(path: str) -> Dict[str,str]: + """Parses a metadata_store path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, MetadataServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the metadata service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MetadataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MetadataServiceTransport): + # transport is a MetadataServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_metadata_store(self, + request: metadata_service.CreateMetadataStoreRequest = None, + *, + parent: str = None, + metadata_store: gca_metadata_store.MetadataStore = None, + metadata_store_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Initializes a MetadataStore, including allocation of + resources. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest): + The request object. Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + parent (str): + Required. The resource name of the + Location where the MetadataStore should + be created. Format: + projects/{project}/locations/{location}/ + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): + Required. The MetadataStore to + create. + + This corresponds to the ``metadata_store`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_store_id (str): + The {metadatastore} portion of the resource name with + the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all MetadataStores in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting MetadataStore.) + + This corresponds to the ``metadata_store_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_store, metadata_store_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataStoreRequest): + request = metadata_service.CreateMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_store is not None: + request.metadata_store = metadata_store + if metadata_store_id is not None: + request.metadata_store_id = metadata_store_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_metadata_store.MetadataStore, + metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def get_metadata_store(self, + request: metadata_service.GetMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_store.MetadataStore: + r"""Retrieves a specific MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest): + The request object. Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. + name (str): + Required. The resource name of the + MetadataStore to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataStore: + Instance of a metadata store. + Contains a set of metadata that can be + queried. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataStoreRequest): + request = metadata_service.GetMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_metadata_stores(self, + request: metadata_service.ListMetadataStoresRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataStoresPager: + r"""Lists MetadataStores for a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): + The request object. Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + parent (str): + Required. The Location whose + MetadataStores should be listed. Format: + projects/{project}/locations/{location} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresPager: + Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataStoresRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataStoresRequest): + request = metadata_service.ListMetadataStoresRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_stores] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataStoresPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_metadata_store(self, + request: metadata_service.DeleteMetadataStoreRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a single MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest): + The request object. Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + name (str): + Required. The resource name of the + MetadataStore to delete. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteMetadataStoreRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteMetadataStoreRequest): + request = metadata_service.DeleteMetadataStoreRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_metadata_store] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, + ) + + # Done; return the response. + return response + + def create_artifact(self, + request: metadata_service.CreateArtifactRequest = None, + *, + parent: str = None, + artifact: gca_artifact.Artifact = None, + artifact_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Creates an Artifact associated with a MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest): + The request object. Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. + parent (str): + Required. The resource name of the + MetadataStore where the Artifact should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact to create. + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifact_id (str): + The {artifact} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + If not provided, the Artifact's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Artifacts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Artifact.) + + This corresponds to the ``artifact_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, artifact, artifact_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateArtifactRequest): + request = metadata_service.CreateArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if artifact is not None: + request.artifact = artifact + if artifact_id is not None: + request.artifact_id = artifact_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_artifact(self, + request: metadata_service.GetArtifactRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> artifact.Artifact: + r"""Retrieves a specific Artifact. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetArtifactRequest): + The request object. Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. + name (str): + Required. The resource name of the + Artifact to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetArtifactRequest): + request = metadata_service.GetArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_artifacts(self, + request: metadata_service.ListArtifactsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListArtifactsPager: + r"""Lists Artifacts in the MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): + The request object. Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + parent (str): + Required. The MetadataStore whose + Artifacts should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsPager: + Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListArtifactsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListArtifactsRequest): + request = metadata_service.ListArtifactsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_artifacts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListArtifactsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_artifact(self, + request: metadata_service.UpdateArtifactRequest = None, + *, + artifact: gca_artifact.Artifact = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_artifact.Artifact: + r"""Updates a stored Artifact. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest): + The request object. Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact containing updates. The + Artifact's + [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Artifact: + Instance of a general artifact. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateArtifactRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateArtifactRequest): + request = metadata_service.UpdateArtifactRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_artifact] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact.name", request.artifact.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_context(self, + request: metadata_service.CreateContextRequest = None, + *, + parent: str = None, + context: gca_context.Context = None, + context_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Creates a Context associated with a MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateContextRequest): + The request object. Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. + parent (str): + Required. The resource name of the + MetadataStore where the Context should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context to create. + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + context_id (str): + The {context} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. + If not provided, the Context's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Contexts in the parent MetadataStore. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the + preexisting Context.) + + This corresponds to the ``context_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, context, context_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateContextRequest): + request = metadata_service.CreateContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if context is not None: + request.context = context + if context_id is not None: + request.context_id = context_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_context(self, + request: metadata_service.GetContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> context.Context: + r"""Retrieves a specific Context. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetContextRequest): + The request object. Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. + name (str): + Required. The resource name of the + Context to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetContextRequest): + request = metadata_service.GetContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_contexts(self, + request: metadata_service.ListContextsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListContextsPager: + r"""Lists Contexts on the MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): + The request object. Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + parent (str): + Required. The MetadataStore whose + Contexts should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsPager: + Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListContextsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListContextsRequest): + request = metadata_service.ListContextsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_contexts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListContextsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_context(self, + request: metadata_service.UpdateContextRequest = None, + *, + context: gca_context.Context = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_context.Context: + r"""Updates a stored Context. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateContextRequest): + The request object. Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1beta1.Context.name] + field is used to identify the Context to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Context: + Instance of a general context. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateContextRequest): + request = metadata_service.UpdateContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context.name", request.context.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_context(self, + request: metadata_service.DeleteContextRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a stored Context. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteContextRequest): + The request object. Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. + name (str): + Required. The resource name of the + Context to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.DeleteContextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.DeleteContextRequest): + request = metadata_service.DeleteContextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_context] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def add_context_artifacts_and_executions(self, + request: metadata_service.AddContextArtifactsAndExecutionsRequest = None, + *, + context: str = None, + artifacts: Sequence[str] = None, + executions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: + r"""Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Args: + request (google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest): + The request object. Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + context (str): + Required. The resource name of the + Context that the Artifacts and + Executions belong to. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + artifacts (Sequence[str]): + The resource names of the Artifacts + to attribute to the Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + This corresponds to the ``artifacts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + executions (Sequence[str]): + The resource names of the Executions + to associate with the Context. + + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``executions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: + Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, artifacts, executions]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextArtifactsAndExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddContextArtifactsAndExecutionsRequest): + request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if artifacts is not None: + request.artifacts = artifacts + if executions is not None: + request.executions = executions + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_context_artifacts_and_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_context_children(self, + request: metadata_service.AddContextChildrenRequest = None, + *, + context: str = None, + child_contexts: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddContextChildrenResponse: + r"""Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Args: + request (google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest): + The request object. Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + context (str): + Required. The resource name of the + parent Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + child_contexts (Sequence[str]): + The resource names of the child + Contexts. + + This corresponds to the ``child_contexts`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: + Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context, child_contexts]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddContextChildrenRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddContextChildrenRequest): + request = metadata_service.AddContextChildrenRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + if child_contexts is not None: + request.child_contexts = child_contexts + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_context_children] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_context_lineage_subgraph(self, + request: metadata_service.QueryContextLineageSubgraphRequest = None, + *, + context: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Args: + request (google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest): + The request object. Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. + context (str): + Required. The resource name of the Context whose + Artifacts and Executions should be retrieved as a + LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``context`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([context]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryContextLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryContextLineageSubgraphRequest): + request = metadata_service.QueryContextLineageSubgraphRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if context is not None: + request.context = context + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_context_lineage_subgraph] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("context", request.context), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_execution(self, + request: metadata_service.CreateExecutionRequest = None, + *, + parent: str = None, + execution: gca_execution.Execution = None, + execution_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Creates an Execution associated with a MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest): + The request object. Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. + parent (str): + Required. The resource name of the + MetadataStore where the Execution should + be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution to create. + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + execution_id (str): + The {execution} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + If not provided, the Execution's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all Executions in the parent + MetadataStore. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting Execution.) + + This corresponds to the ``execution_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, execution, execution_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateExecutionRequest): + request = metadata_service.CreateExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if execution is not None: + request.execution = execution + if execution_id is not None: + request.execution_id = execution_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_execution(self, + request: metadata_service.GetExecutionRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> execution.Execution: + r"""Retrieves a specific Execution. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetExecutionRequest): + The request object. Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. + name (str): + Required. The resource name of the + Execution to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetExecutionRequest): + request = metadata_service.GetExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_executions(self, + request: metadata_service.ListExecutionsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExecutionsPager: + r"""Lists Executions in the MetadataStore. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): + The request object. Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + parent (str): + Required. The MetadataStore whose + Executions should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsPager: + Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListExecutionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListExecutionsRequest): + request = metadata_service.ListExecutionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_executions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListExecutionsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_execution(self, + request: metadata_service.UpdateExecutionRequest = None, + *, + execution: gca_execution.Execution = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_execution.Execution: + r"""Updates a stored Execution. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest): + The request object. Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution containing updates. The + Execution's + [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] + field is used to identify the Execution to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating + which fields should be updated. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Execution: + Instance of a general execution. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.UpdateExecutionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.UpdateExecutionRequest): + request = metadata_service.UpdateExecutionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_execution] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution.name", request.execution.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_execution_events(self, + request: metadata_service.AddExecutionEventsRequest = None, + *, + execution: str = None, + events: Sequence[event.Event] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_service.AddExecutionEventsResponse: + r"""Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Args: + request (google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest): + The request object. Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + execution (str): + Required. The resource name of the + Execution that the Events connect + Artifacts with. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): + The Events to create and add. + This corresponds to the ``events`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: + Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution, events]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.AddExecutionEventsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.AddExecutionEventsRequest): + request = metadata_service.AddExecutionEventsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + if events is not None: + request.events = events + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_execution_events] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_execution_inputs_and_outputs(self, + request: metadata_service.QueryExecutionInputsAndOutputsRequest = None, + *, + execution: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Args: + request (google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest): + The request object. Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. + execution (str): + Required. The resource name of the + Execution whose input and output + Artifacts should be retrieved as a + LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + + This corresponds to the ``execution`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([execution]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryExecutionInputsAndOutputsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryExecutionInputsAndOutputsRequest): + request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if execution is not None: + request.execution = execution + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_execution_inputs_and_outputs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("execution", request.execution), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_metadata_schema(self, + request: metadata_service.CreateMetadataSchemaRequest = None, + *, + parent: str = None, + metadata_schema: gca_metadata_schema.MetadataSchema = None, + metadata_schema_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_metadata_schema.MetadataSchema: + r"""Creates a MetadataSchema. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest): + The request object. Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. + parent (str): + Required. The resource name of the + MetadataStore where the MetadataSchema + should be created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): + Required. The MetadataSchema to + create. + + This corresponds to the ``metadata_schema`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with + the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be + unique across all MetadataSchemas in the parent + Location. (Otherwise the request will fail with + ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't + view the preexisting MetadataSchema.) + + This corresponds to the ``metadata_schema_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.CreateMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.CreateMetadataSchemaRequest): + request = metadata_service.CreateMetadataSchemaRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if metadata_schema is not None: + request.metadata_schema = metadata_schema + if metadata_schema_id is not None: + request.metadata_schema_id = metadata_schema_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_metadata_schema(self, + request: metadata_service.GetMetadataSchemaRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> metadata_schema.MetadataSchema: + r"""Retrieves a specific MetadataSchema. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest): + The request object. Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. + name (str): + Required. The resource name of the + MetadataSchema to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.MetadataSchema: + Instance of a general MetadataSchema. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.GetMetadataSchemaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.GetMetadataSchemaRequest): + request = metadata_service.GetMetadataSchemaRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_metadata_schema] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_metadata_schemas(self, + request: metadata_service.ListMetadataSchemasRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListMetadataSchemasPager: + r"""Lists MetadataSchemas. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): + The request object. Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + parent (str): + Required. The MetadataStore whose + MetadataSchemas should be listed. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasPager: + Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.ListMetadataSchemasRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.ListMetadataSchemasRequest): + request = metadata_service.ListMetadataSchemasRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_metadata_schemas] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMetadataSchemasPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_artifact_lineage_subgraph(self, + request: metadata_service.QueryArtifactLineageSubgraphRequest = None, + *, + artifact: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> lineage_subgraph.LineageSubgraph: + r"""Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Args: + request (google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest): + The request object. Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. + artifact (str): + Required. The resource name of the Artifact whose + Lineage needs to be retrieved as a LineageSubgraph. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + The request may error with FAILED_PRECONDITION if the + number of Artifacts, the number of Executions, or the + number of Events that would be returned for the Context + exceeds 1000. + + This corresponds to the ``artifact`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.LineageSubgraph: + A subgraph of the overall lineage + graph. Event edges connect Artifact and + Execution nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([artifact]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a metadata_service.QueryArtifactLineageSubgraphRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, metadata_service.QueryArtifactLineageSubgraphRequest): + request = metadata_service.QueryArtifactLineageSubgraphRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if artifact is not None: + request.artifact = artifact + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_artifact_lineage_subgraph] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("artifact", request.artifact), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MetadataServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py new file mode 100644 index 0000000000..1bff1d843a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py @@ -0,0 +1,633 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store + + +class ListMetadataStoresPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListMetadataStoresResponse], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[metadata_store.MetadataStore]: + for page in self.pages: + yield from page.metadata_stores + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListMetadataStoresAsyncPager: + """A pager for iterating through ``list_metadata_stores`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_stores`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataStores`` requests and continue to iterate + through the ``metadata_stores`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListMetadataStoresResponse]], + request: metadata_service.ListMetadataStoresRequest, + response: metadata_service.ListMetadataStoresResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataStoresRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metadata_service.ListMetadataStoresResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[metadata_store.MetadataStore]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_stores: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListArtifactsPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListArtifactsResponse], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[artifact.Artifact]: + for page in self.pages: + yield from page.artifacts + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListArtifactsAsyncPager: + """A pager for iterating through ``list_artifacts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``artifacts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListArtifacts`` requests and continue to iterate + through the ``artifacts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListArtifactsResponse]], + request: metadata_service.ListArtifactsRequest, + response: metadata_service.ListArtifactsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListArtifactsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metadata_service.ListArtifactsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[artifact.Artifact]: + async def async_generator(): + async for page in self.pages: + for response in page.artifacts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListContextsPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListContextsResponse], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[context.Context]: + for page in self.pages: + yield from page.contexts + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListContextsAsyncPager: + """A pager for iterating through ``list_contexts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``contexts`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListContexts`` requests and continue to iterate + through the ``contexts`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListContextsResponse]], + request: metadata_service.ListContextsRequest, + response: metadata_service.ListContextsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListContextsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metadata_service.ListContextsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[context.Context]: + async def async_generator(): + async for page in self.pages: + for response in page.contexts: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListExecutionsPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListExecutionsResponse], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[execution.Execution]: + for page in self.pages: + yield from page.executions + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListExecutionsAsyncPager: + """A pager for iterating through ``list_executions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``executions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListExecutions`` requests and continue to iterate + through the ``executions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListExecutionsResponse]], + request: metadata_service.ListExecutionsRequest, + response: metadata_service.ListExecutionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListExecutionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metadata_service.ListExecutionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[execution.Execution]: + async def async_generator(): + async for page in self.pages: + for response in page.executions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and + provides an ``__iter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., metadata_service.ListMetadataSchemasResponse], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[metadata_schema.MetadataSchema]: + for page in self.pages: + yield from page.metadata_schemas + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListMetadataSchemasAsyncPager: + """A pager for iterating through ``list_metadata_schemas`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``metadata_schemas`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMetadataSchemas`` requests and continue to iterate + through the ``metadata_schemas`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[metadata_service.ListMetadataSchemasResponse]], + request: metadata_service.ListMetadataSchemasRequest, + response: metadata_service.ListMetadataSchemasResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = metadata_service.ListMetadataSchemasRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[metadata_service.ListMetadataSchemasResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[metadata_schema.MetadataSchema]: + async def async_generator(): + async for page in self.pages: + for response in page.metadata_schemas: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py new file mode 100644 index 0000000000..688ce8218c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import MetadataServiceTransport +from .grpc import MetadataServiceGrpcTransport +from .grpc_asyncio import MetadataServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] +_transport_registry['grpc'] = MetadataServiceGrpcTransport +_transport_registry['grpc_asyncio'] = MetadataServiceGrpcAsyncIOTransport + +__all__ = ( + 'MetadataServiceTransport', + 'MetadataServiceGrpcTransport', + 'MetadataServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py new file mode 100644 index 0000000000..388f641b8f --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py @@ -0,0 +1,535 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class MetadataServiceTransport(abc.ABC): + """Abstract transport class for MetadataService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_metadata_store: gapic_v1.method.wrap_method( + self.create_metadata_store, + default_timeout=5.0, + client_info=client_info, + ), + self.get_metadata_store: gapic_v1.method.wrap_method( + self.get_metadata_store, + default_timeout=5.0, + client_info=client_info, + ), + self.list_metadata_stores: gapic_v1.method.wrap_method( + self.list_metadata_stores, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_metadata_store: gapic_v1.method.wrap_method( + self.delete_metadata_store, + default_timeout=5.0, + client_info=client_info, + ), + self.create_artifact: gapic_v1.method.wrap_method( + self.create_artifact, + default_timeout=5.0, + client_info=client_info, + ), + self.get_artifact: gapic_v1.method.wrap_method( + self.get_artifact, + default_timeout=5.0, + client_info=client_info, + ), + self.list_artifacts: gapic_v1.method.wrap_method( + self.list_artifacts, + default_timeout=5.0, + client_info=client_info, + ), + self.update_artifact: gapic_v1.method.wrap_method( + self.update_artifact, + default_timeout=5.0, + client_info=client_info, + ), + self.create_context: gapic_v1.method.wrap_method( + self.create_context, + default_timeout=5.0, + client_info=client_info, + ), + self.get_context: gapic_v1.method.wrap_method( + self.get_context, + default_timeout=5.0, + client_info=client_info, + ), + self.list_contexts: gapic_v1.method.wrap_method( + self.list_contexts, + default_timeout=5.0, + client_info=client_info, + ), + self.update_context: gapic_v1.method.wrap_method( + self.update_context, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_context: gapic_v1.method.wrap_method( + self.delete_context, + default_timeout=5.0, + client_info=client_info, + ), + self.add_context_artifacts_and_executions: gapic_v1.method.wrap_method( + self.add_context_artifacts_and_executions, + default_timeout=5.0, + client_info=client_info, + ), + self.add_context_children: gapic_v1.method.wrap_method( + self.add_context_children, + default_timeout=5.0, + client_info=client_info, + ), + self.query_context_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_context_lineage_subgraph, + default_timeout=5.0, + client_info=client_info, + ), + self.create_execution: gapic_v1.method.wrap_method( + self.create_execution, + default_timeout=5.0, + client_info=client_info, + ), + self.get_execution: gapic_v1.method.wrap_method( + self.get_execution, + default_timeout=5.0, + client_info=client_info, + ), + self.list_executions: gapic_v1.method.wrap_method( + self.list_executions, + default_timeout=5.0, + client_info=client_info, + ), + self.update_execution: gapic_v1.method.wrap_method( + self.update_execution, + default_timeout=5.0, + client_info=client_info, + ), + self.add_execution_events: gapic_v1.method.wrap_method( + self.add_execution_events, + default_timeout=5.0, + client_info=client_info, + ), + self.query_execution_inputs_and_outputs: gapic_v1.method.wrap_method( + self.query_execution_inputs_and_outputs, + default_timeout=5.0, + client_info=client_info, + ), + self.create_metadata_schema: gapic_v1.method.wrap_method( + self.create_metadata_schema, + default_timeout=5.0, + client_info=client_info, + ), + self.get_metadata_schema: gapic_v1.method.wrap_method( + self.get_metadata_schema, + default_timeout=5.0, + client_info=client_info, + ), + self.list_metadata_schemas: gapic_v1.method.wrap_method( + self.list_metadata_schemas, + default_timeout=5.0, + client_info=client_info, + ), + self.query_artifact_lineage_subgraph: gapic_v1.method.wrap_method( + self.query_artifact_lineage_subgraph, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_metadata_store(self) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_metadata_store(self) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + Union[ + metadata_store.MetadataStore, + Awaitable[metadata_store.MetadataStore] + ]]: + raise NotImplementedError() + + @property + def list_metadata_stores(self) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + Union[ + metadata_service.ListMetadataStoresResponse, + Awaitable[metadata_service.ListMetadataStoresResponse] + ]]: + raise NotImplementedError() + + @property + def delete_metadata_store(self) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_artifact(self) -> Callable[ + [metadata_service.CreateArtifactRequest], + Union[ + gca_artifact.Artifact, + Awaitable[gca_artifact.Artifact] + ]]: + raise NotImplementedError() + + @property + def get_artifact(self) -> Callable[ + [metadata_service.GetArtifactRequest], + Union[ + artifact.Artifact, + Awaitable[artifact.Artifact] + ]]: + raise NotImplementedError() + + @property + def list_artifacts(self) -> Callable[ + [metadata_service.ListArtifactsRequest], + Union[ + metadata_service.ListArtifactsResponse, + Awaitable[metadata_service.ListArtifactsResponse] + ]]: + raise NotImplementedError() + + @property + def update_artifact(self) -> Callable[ + [metadata_service.UpdateArtifactRequest], + Union[ + gca_artifact.Artifact, + Awaitable[gca_artifact.Artifact] + ]]: + raise NotImplementedError() + + @property + def create_context(self) -> Callable[ + [metadata_service.CreateContextRequest], + Union[ + gca_context.Context, + Awaitable[gca_context.Context] + ]]: + raise NotImplementedError() + + @property + def get_context(self) -> Callable[ + [metadata_service.GetContextRequest], + Union[ + context.Context, + Awaitable[context.Context] + ]]: + raise NotImplementedError() + + @property + def list_contexts(self) -> Callable[ + [metadata_service.ListContextsRequest], + Union[ + metadata_service.ListContextsResponse, + Awaitable[metadata_service.ListContextsResponse] + ]]: + raise NotImplementedError() + + @property + def update_context(self) -> Callable[ + [metadata_service.UpdateContextRequest], + Union[ + gca_context.Context, + Awaitable[gca_context.Context] + ]]: + raise NotImplementedError() + + @property + def delete_context(self) -> Callable[ + [metadata_service.DeleteContextRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def add_context_artifacts_and_executions(self) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + Union[ + metadata_service.AddContextArtifactsAndExecutionsResponse, + Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse] + ]]: + raise NotImplementedError() + + @property + def add_context_children(self) -> Callable[ + [metadata_service.AddContextChildrenRequest], + Union[ + metadata_service.AddContextChildrenResponse, + Awaitable[metadata_service.AddContextChildrenResponse] + ]]: + raise NotImplementedError() + + @property + def query_context_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph] + ]]: + raise NotImplementedError() + + @property + def create_execution(self) -> Callable[ + [metadata_service.CreateExecutionRequest], + Union[ + gca_execution.Execution, + Awaitable[gca_execution.Execution] + ]]: + raise NotImplementedError() + + @property + def get_execution(self) -> Callable[ + [metadata_service.GetExecutionRequest], + Union[ + execution.Execution, + Awaitable[execution.Execution] + ]]: + raise NotImplementedError() + + @property + def list_executions(self) -> Callable[ + [metadata_service.ListExecutionsRequest], + Union[ + metadata_service.ListExecutionsResponse, + Awaitable[metadata_service.ListExecutionsResponse] + ]]: + raise NotImplementedError() + + @property + def update_execution(self) -> Callable[ + [metadata_service.UpdateExecutionRequest], + Union[ + gca_execution.Execution, + Awaitable[gca_execution.Execution] + ]]: + raise NotImplementedError() + + @property + def add_execution_events(self) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + Union[ + metadata_service.AddExecutionEventsResponse, + Awaitable[metadata_service.AddExecutionEventsResponse] + ]]: + raise NotImplementedError() + + @property + def query_execution_inputs_and_outputs(self) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph] + ]]: + raise NotImplementedError() + + @property + def create_metadata_schema(self) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + Union[ + gca_metadata_schema.MetadataSchema, + Awaitable[gca_metadata_schema.MetadataSchema] + ]]: + raise NotImplementedError() + + @property + def get_metadata_schema(self) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + Union[ + metadata_schema.MetadataSchema, + Awaitable[metadata_schema.MetadataSchema] + ]]: + raise NotImplementedError() + + @property + def list_metadata_schemas(self) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + Union[ + metadata_service.ListMetadataSchemasResponse, + Awaitable[metadata_service.ListMetadataSchemasResponse] + ]]: + raise NotImplementedError() + + @property + def query_artifact_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + Union[ + lineage_subgraph.LineageSubgraph, + Awaitable[lineage_subgraph.LineageSubgraph] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'MetadataServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py new file mode 100644 index 0000000000..5dbecd6d3e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py @@ -0,0 +1,951 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.longrunning import operations_pb2 # type: ignore +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO + + +class MetadataServiceGrpcTransport(MetadataServiceTransport): + """gRPC backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store(self) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_store' not in self._stubs: + self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore', + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_metadata_store'] + + @property + def get_metadata_store(self) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + metadata_store.MetadataStore]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + ~.MetadataStore]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_store' not in self._stubs: + self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore', + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs['get_metadata_store'] + + @property + def list_metadata_stores(self) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + metadata_service.ListMetadataStoresResponse]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + ~.ListMetadataStoresResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_stores' not in self._stubs: + self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores', + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs['list_metadata_stores'] + + @property + def delete_metadata_store(self) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore. + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_metadata_store' not in self._stubs: + self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore', + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_metadata_store'] + + @property + def create_artifact(self) -> Callable[ + [metadata_service.CreateArtifactRequest], + gca_artifact.Artifact]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_artifact' not in self._stubs: + self._stubs['create_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact', + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['create_artifact'] + + @property + def get_artifact(self) -> Callable[ + [metadata_service.GetArtifactRequest], + artifact.Artifact]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_artifact' not in self._stubs: + self._stubs['get_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact', + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs['get_artifact'] + + @property + def list_artifacts(self) -> Callable[ + [metadata_service.ListArtifactsRequest], + metadata_service.ListArtifactsResponse]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + ~.ListArtifactsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_artifacts' not in self._stubs: + self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts', + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs['list_artifacts'] + + @property + def update_artifact(self) -> Callable[ + [metadata_service.UpdateArtifactRequest], + gca_artifact.Artifact]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + ~.Artifact]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_artifact' not in self._stubs: + self._stubs['update_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact', + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['update_artifact'] + + @property + def create_context(self) -> Callable[ + [metadata_service.CreateContextRequest], + gca_context.Context]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_context' not in self._stubs: + self._stubs['create_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext', + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['create_context'] + + @property + def get_context(self) -> Callable[ + [metadata_service.GetContextRequest], + context.Context]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_context' not in self._stubs: + self._stubs['get_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetContext', + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs['get_context'] + + @property + def list_contexts(self) -> Callable[ + [metadata_service.ListContextsRequest], + metadata_service.ListContextsResponse]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + ~.ListContextsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_contexts' not in self._stubs: + self._stubs['list_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts', + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs['list_contexts'] + + @property + def update_context(self) -> Callable[ + [metadata_service.UpdateContextRequest], + gca_context.Context]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + ~.Context]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_context' not in self._stubs: + self._stubs['update_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext', + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['update_context'] + + @property + def delete_context(self) -> Callable[ + [metadata_service.DeleteContextRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_context' not in self._stubs: + self._stubs['delete_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext', + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_context'] + + @property + def add_context_artifacts_and_executions(self) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + metadata_service.AddContextArtifactsAndExecutionsResponse]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + ~.AddContextArtifactsAndExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_artifacts_and_executions' not in self._stubs: + self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions', + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs['add_context_artifacts_and_executions'] + + @property + def add_context_children(self) -> Callable[ + [metadata_service.AddContextChildrenRequest], + metadata_service.AddContextChildrenResponse]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + ~.AddContextChildrenResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_children' not in self._stubs: + self._stubs['add_context_children'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren', + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs['add_context_children'] + + @property + def query_context_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_context_lineage_subgraph' not in self._stubs: + self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph', + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_context_lineage_subgraph'] + + @property + def create_execution(self) -> Callable[ + [metadata_service.CreateExecutionRequest], + gca_execution.Execution]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_execution' not in self._stubs: + self._stubs['create_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution', + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['create_execution'] + + @property + def get_execution(self) -> Callable[ + [metadata_service.GetExecutionRequest], + execution.Execution]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_execution' not in self._stubs: + self._stubs['get_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution', + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs['get_execution'] + + @property + def list_executions(self) -> Callable[ + [metadata_service.ListExecutionsRequest], + metadata_service.ListExecutionsResponse]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + ~.ListExecutionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_executions' not in self._stubs: + self._stubs['list_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions', + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs['list_executions'] + + @property + def update_execution(self) -> Callable[ + [metadata_service.UpdateExecutionRequest], + gca_execution.Execution]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + ~.Execution]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_execution' not in self._stubs: + self._stubs['update_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution', + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['update_execution'] + + @property + def add_execution_events(self) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + metadata_service.AddExecutionEventsResponse]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + ~.AddExecutionEventsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_execution_events' not in self._stubs: + self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents', + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs['add_execution_events'] + + @property + def query_execution_inputs_and_outputs(self) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + lineage_subgraph.LineageSubgraph]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_execution_inputs_and_outputs' not in self._stubs: + self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs', + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_execution_inputs_and_outputs'] + + @property + def create_metadata_schema(self) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + gca_metadata_schema.MetadataSchema]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates a MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_schema' not in self._stubs: + self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema', + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['create_metadata_schema'] + + @property + def get_metadata_schema(self) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + metadata_schema.MetadataSchema]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + ~.MetadataSchema]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_schema' not in self._stubs: + self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema', + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['get_metadata_schema'] + + @property + def list_metadata_schemas(self) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + metadata_service.ListMetadataSchemasResponse]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + ~.ListMetadataSchemasResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_schemas' not in self._stubs: + self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas', + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs['list_metadata_schemas'] + + @property + def query_artifact_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + lineage_subgraph.LineageSubgraph]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + ~.LineageSubgraph]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_artifact_lineage_subgraph' not in self._stubs: + self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph', + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_artifact_lineage_subgraph'] + + +__all__ = ( + 'MetadataServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..419be7161d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py @@ -0,0 +1,955 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.longrunning import operations_pb2 # type: ignore +from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MetadataServiceGrpcTransport + + +class MetadataServiceGrpcAsyncIOTransport(MetadataServiceTransport): + """gRPC AsyncIO backend transport for MetadataService. + + Service for reading and writing metadata entries. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_metadata_store(self) -> Callable[ + [metadata_service.CreateMetadataStoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create metadata store method over gRPC. + + Initializes a MetadataStore, including allocation of + resources. + + Returns: + Callable[[~.CreateMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_store' not in self._stubs: + self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore', + request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_metadata_store'] + + @property + def get_metadata_store(self) -> Callable[ + [metadata_service.GetMetadataStoreRequest], + Awaitable[metadata_store.MetadataStore]]: + r"""Return a callable for the get metadata store method over gRPC. + + Retrieves a specific MetadataStore. + + Returns: + Callable[[~.GetMetadataStoreRequest], + Awaitable[~.MetadataStore]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_store' not in self._stubs: + self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore', + request_serializer=metadata_service.GetMetadataStoreRequest.serialize, + response_deserializer=metadata_store.MetadataStore.deserialize, + ) + return self._stubs['get_metadata_store'] + + @property + def list_metadata_stores(self) -> Callable[ + [metadata_service.ListMetadataStoresRequest], + Awaitable[metadata_service.ListMetadataStoresResponse]]: + r"""Return a callable for the list metadata stores method over gRPC. + + Lists MetadataStores for a Location. + + Returns: + Callable[[~.ListMetadataStoresRequest], + Awaitable[~.ListMetadataStoresResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_stores' not in self._stubs: + self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores', + request_serializer=metadata_service.ListMetadataStoresRequest.serialize, + response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, + ) + return self._stubs['list_metadata_stores'] + + @property + def delete_metadata_store(self) -> Callable[ + [metadata_service.DeleteMetadataStoreRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete metadata store method over gRPC. + + Deletes a single MetadataStore. + + Returns: + Callable[[~.DeleteMetadataStoreRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_metadata_store' not in self._stubs: + self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore', + request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_metadata_store'] + + @property + def create_artifact(self) -> Callable[ + [metadata_service.CreateArtifactRequest], + Awaitable[gca_artifact.Artifact]]: + r"""Return a callable for the create artifact method over gRPC. + + Creates an Artifact associated with a MetadataStore. + + Returns: + Callable[[~.CreateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_artifact' not in self._stubs: + self._stubs['create_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact', + request_serializer=metadata_service.CreateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['create_artifact'] + + @property + def get_artifact(self) -> Callable[ + [metadata_service.GetArtifactRequest], + Awaitable[artifact.Artifact]]: + r"""Return a callable for the get artifact method over gRPC. + + Retrieves a specific Artifact. + + Returns: + Callable[[~.GetArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_artifact' not in self._stubs: + self._stubs['get_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact', + request_serializer=metadata_service.GetArtifactRequest.serialize, + response_deserializer=artifact.Artifact.deserialize, + ) + return self._stubs['get_artifact'] + + @property + def list_artifacts(self) -> Callable[ + [metadata_service.ListArtifactsRequest], + Awaitable[metadata_service.ListArtifactsResponse]]: + r"""Return a callable for the list artifacts method over gRPC. + + Lists Artifacts in the MetadataStore. + + Returns: + Callable[[~.ListArtifactsRequest], + Awaitable[~.ListArtifactsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_artifacts' not in self._stubs: + self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts', + request_serializer=metadata_service.ListArtifactsRequest.serialize, + response_deserializer=metadata_service.ListArtifactsResponse.deserialize, + ) + return self._stubs['list_artifacts'] + + @property + def update_artifact(self) -> Callable[ + [metadata_service.UpdateArtifactRequest], + Awaitable[gca_artifact.Artifact]]: + r"""Return a callable for the update artifact method over gRPC. + + Updates a stored Artifact. + + Returns: + Callable[[~.UpdateArtifactRequest], + Awaitable[~.Artifact]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_artifact' not in self._stubs: + self._stubs['update_artifact'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact', + request_serializer=metadata_service.UpdateArtifactRequest.serialize, + response_deserializer=gca_artifact.Artifact.deserialize, + ) + return self._stubs['update_artifact'] + + @property + def create_context(self) -> Callable[ + [metadata_service.CreateContextRequest], + Awaitable[gca_context.Context]]: + r"""Return a callable for the create context method over gRPC. + + Creates a Context associated with a MetadataStore. + + Returns: + Callable[[~.CreateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_context' not in self._stubs: + self._stubs['create_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext', + request_serializer=metadata_service.CreateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['create_context'] + + @property + def get_context(self) -> Callable[ + [metadata_service.GetContextRequest], + Awaitable[context.Context]]: + r"""Return a callable for the get context method over gRPC. + + Retrieves a specific Context. + + Returns: + Callable[[~.GetContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_context' not in self._stubs: + self._stubs['get_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetContext', + request_serializer=metadata_service.GetContextRequest.serialize, + response_deserializer=context.Context.deserialize, + ) + return self._stubs['get_context'] + + @property + def list_contexts(self) -> Callable[ + [metadata_service.ListContextsRequest], + Awaitable[metadata_service.ListContextsResponse]]: + r"""Return a callable for the list contexts method over gRPC. + + Lists Contexts on the MetadataStore. + + Returns: + Callable[[~.ListContextsRequest], + Awaitable[~.ListContextsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_contexts' not in self._stubs: + self._stubs['list_contexts'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts', + request_serializer=metadata_service.ListContextsRequest.serialize, + response_deserializer=metadata_service.ListContextsResponse.deserialize, + ) + return self._stubs['list_contexts'] + + @property + def update_context(self) -> Callable[ + [metadata_service.UpdateContextRequest], + Awaitable[gca_context.Context]]: + r"""Return a callable for the update context method over gRPC. + + Updates a stored Context. + + Returns: + Callable[[~.UpdateContextRequest], + Awaitable[~.Context]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_context' not in self._stubs: + self._stubs['update_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext', + request_serializer=metadata_service.UpdateContextRequest.serialize, + response_deserializer=gca_context.Context.deserialize, + ) + return self._stubs['update_context'] + + @property + def delete_context(self) -> Callable[ + [metadata_service.DeleteContextRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete context method over gRPC. + + Deletes a stored Context. + + Returns: + Callable[[~.DeleteContextRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_context' not in self._stubs: + self._stubs['delete_context'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext', + request_serializer=metadata_service.DeleteContextRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_context'] + + @property + def add_context_artifacts_and_executions(self) -> Callable[ + [metadata_service.AddContextArtifactsAndExecutionsRequest], + Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse]]: + r"""Return a callable for the add context artifacts and + executions method over gRPC. + + Adds a set of Artifacts and Executions to a Context. + If any of the Artifacts or Executions have already been + added to a Context, they are simply skipped. + + Returns: + Callable[[~.AddContextArtifactsAndExecutionsRequest], + Awaitable[~.AddContextArtifactsAndExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_artifacts_and_executions' not in self._stubs: + self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions', + request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, + response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, + ) + return self._stubs['add_context_artifacts_and_executions'] + + @property + def add_context_children(self) -> Callable[ + [metadata_service.AddContextChildrenRequest], + Awaitable[metadata_service.AddContextChildrenResponse]]: + r"""Return a callable for the add context children method over gRPC. + + Adds a set of Contexts as children to a parent Context. If any + of the child Contexts have already been added to the parent + Context, they are simply skipped. If this call would create a + cycle or cause any Context to have more than 10 parents, the + request will fail with an INVALID_ARGUMENT error. + + Returns: + Callable[[~.AddContextChildrenRequest], + Awaitable[~.AddContextChildrenResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_context_children' not in self._stubs: + self._stubs['add_context_children'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren', + request_serializer=metadata_service.AddContextChildrenRequest.serialize, + response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, + ) + return self._stubs['add_context_children'] + + @property + def query_context_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryContextLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph]]: + r"""Return a callable for the query context lineage subgraph method over gRPC. + + Retrieves Artifacts and Executions within the + specified Context, connected by Event edges and returned + as a LineageSubgraph. + + Returns: + Callable[[~.QueryContextLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_context_lineage_subgraph' not in self._stubs: + self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph', + request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_context_lineage_subgraph'] + + @property + def create_execution(self) -> Callable[ + [metadata_service.CreateExecutionRequest], + Awaitable[gca_execution.Execution]]: + r"""Return a callable for the create execution method over gRPC. + + Creates an Execution associated with a MetadataStore. + + Returns: + Callable[[~.CreateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_execution' not in self._stubs: + self._stubs['create_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution', + request_serializer=metadata_service.CreateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['create_execution'] + + @property + def get_execution(self) -> Callable[ + [metadata_service.GetExecutionRequest], + Awaitable[execution.Execution]]: + r"""Return a callable for the get execution method over gRPC. + + Retrieves a specific Execution. + + Returns: + Callable[[~.GetExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_execution' not in self._stubs: + self._stubs['get_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution', + request_serializer=metadata_service.GetExecutionRequest.serialize, + response_deserializer=execution.Execution.deserialize, + ) + return self._stubs['get_execution'] + + @property + def list_executions(self) -> Callable[ + [metadata_service.ListExecutionsRequest], + Awaitable[metadata_service.ListExecutionsResponse]]: + r"""Return a callable for the list executions method over gRPC. + + Lists Executions in the MetadataStore. + + Returns: + Callable[[~.ListExecutionsRequest], + Awaitable[~.ListExecutionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_executions' not in self._stubs: + self._stubs['list_executions'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions', + request_serializer=metadata_service.ListExecutionsRequest.serialize, + response_deserializer=metadata_service.ListExecutionsResponse.deserialize, + ) + return self._stubs['list_executions'] + + @property + def update_execution(self) -> Callable[ + [metadata_service.UpdateExecutionRequest], + Awaitable[gca_execution.Execution]]: + r"""Return a callable for the update execution method over gRPC. + + Updates a stored Execution. + + Returns: + Callable[[~.UpdateExecutionRequest], + Awaitable[~.Execution]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_execution' not in self._stubs: + self._stubs['update_execution'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution', + request_serializer=metadata_service.UpdateExecutionRequest.serialize, + response_deserializer=gca_execution.Execution.deserialize, + ) + return self._stubs['update_execution'] + + @property + def add_execution_events(self) -> Callable[ + [metadata_service.AddExecutionEventsRequest], + Awaitable[metadata_service.AddExecutionEventsResponse]]: + r"""Return a callable for the add execution events method over gRPC. + + Adds Events to the specified Execution. An Event + indicates whether an Artifact was used as an input or + output for an Execution. If an Event already exists + between the Execution and the Artifact, the Event is + skipped. + + Returns: + Callable[[~.AddExecutionEventsRequest], + Awaitable[~.AddExecutionEventsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_execution_events' not in self._stubs: + self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents', + request_serializer=metadata_service.AddExecutionEventsRequest.serialize, + response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, + ) + return self._stubs['add_execution_events'] + + @property + def query_execution_inputs_and_outputs(self) -> Callable[ + [metadata_service.QueryExecutionInputsAndOutputsRequest], + Awaitable[lineage_subgraph.LineageSubgraph]]: + r"""Return a callable for the query execution inputs and + outputs method over gRPC. + + Obtains the set of input and output Artifacts for + this Execution, in the form of LineageSubgraph that also + contains the Execution and connecting Events. + + Returns: + Callable[[~.QueryExecutionInputsAndOutputsRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_execution_inputs_and_outputs' not in self._stubs: + self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs', + request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_execution_inputs_and_outputs'] + + @property + def create_metadata_schema(self) -> Callable[ + [metadata_service.CreateMetadataSchemaRequest], + Awaitable[gca_metadata_schema.MetadataSchema]]: + r"""Return a callable for the create metadata schema method over gRPC. + + Creates a MetadataSchema. + + Returns: + Callable[[~.CreateMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_metadata_schema' not in self._stubs: + self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema', + request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, + response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['create_metadata_schema'] + + @property + def get_metadata_schema(self) -> Callable[ + [metadata_service.GetMetadataSchemaRequest], + Awaitable[metadata_schema.MetadataSchema]]: + r"""Return a callable for the get metadata schema method over gRPC. + + Retrieves a specific MetadataSchema. + + Returns: + Callable[[~.GetMetadataSchemaRequest], + Awaitable[~.MetadataSchema]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_metadata_schema' not in self._stubs: + self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema', + request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, + response_deserializer=metadata_schema.MetadataSchema.deserialize, + ) + return self._stubs['get_metadata_schema'] + + @property + def list_metadata_schemas(self) -> Callable[ + [metadata_service.ListMetadataSchemasRequest], + Awaitable[metadata_service.ListMetadataSchemasResponse]]: + r"""Return a callable for the list metadata schemas method over gRPC. + + Lists MetadataSchemas. + + Returns: + Callable[[~.ListMetadataSchemasRequest], + Awaitable[~.ListMetadataSchemasResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_metadata_schemas' not in self._stubs: + self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas', + request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, + response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, + ) + return self._stubs['list_metadata_schemas'] + + @property + def query_artifact_lineage_subgraph(self) -> Callable[ + [metadata_service.QueryArtifactLineageSubgraphRequest], + Awaitable[lineage_subgraph.LineageSubgraph]]: + r"""Return a callable for the query artifact lineage + subgraph method over gRPC. + + Retrieves lineage of an Artifact represented through + Artifacts and Executions connected by Event edges and + returned as a LineageSubgraph. + + Returns: + Callable[[~.QueryArtifactLineageSubgraphRequest], + Awaitable[~.LineageSubgraph]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'query_artifact_lineage_subgraph' not in self._stubs: + self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph', + request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, + response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, + ) + return self._stubs['query_artifact_lineage_subgraph'] + + +__all__ = ( + 'MetadataServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py new file mode 100644 index 0000000000..b32b10b1d7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import MigrationServiceClient +from .async_client import MigrationServiceAsyncClient + +__all__ = ( + 'MigrationServiceClient', + 'MigrationServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py new file mode 100644 index 0000000000..123a0fd369 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.migration_service import pagers +from google.cloud.aiplatform_v1beta1.types import migratable_resource +from google.cloud.aiplatform_v1beta1.types import migration_service +from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport +from .client import MigrationServiceClient + + +class MigrationServiceAsyncClient: + """A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + """ + + _client: MigrationServiceClient + + DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT + + annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path) + parse_annotated_dataset_path = staticmethod(MigrationServiceClient.parse_annotated_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + dataset_path = staticmethod(MigrationServiceClient.dataset_path) + parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) + model_path = staticmethod(MigrationServiceClient.model_path) + parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) + model_path = staticmethod(MigrationServiceClient.model_path) + parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) + version_path = staticmethod(MigrationServiceClient.version_path) + parse_version_path = staticmethod(MigrationServiceClient.parse_version_path) + common_billing_account_path = staticmethod(MigrationServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(MigrationServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(MigrationServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(MigrationServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(MigrationServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(MigrationServiceClient.parse_common_organization_path) + common_project_path = staticmethod(MigrationServiceClient.common_project_path) + parse_common_project_path = staticmethod(MigrationServiceClient.parse_common_project_path) + common_location_path = staticmethod(MigrationServiceClient.common_location_path) + parse_common_location_path = staticmethod(MigrationServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceAsyncClient: The constructed client. + """ + return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceAsyncClient: The constructed client. + """ + return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MigrationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MigrationServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, MigrationServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the migration service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.MigrationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = MigrationServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def search_migratable_resources(self, + request: migration_service.SearchMigratableResourcesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchMigratableResourcesAsyncPager: + r"""Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest`): + The request object. Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + parent (:class:`str`): + Required. The location that the migratable resources + should be searched from. It's the Vertex AI location + that the resources can be migrated to, not the + resources' original location. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: + Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = migration_service.SearchMigratableResourcesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.search_migratable_resources, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.SearchMigratableResourcesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def batch_migrate_resources(self, + request: migration_service.BatchMigrateResourcesRequest = None, + *, + parent: str = None, + migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest`): + The request object. Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + parent (:class:`str`): + Required. The location of the migrated resource will + live in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + migrate_resource_requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]`): + Required. The request messages + specifying the resources to migrate. + They must be in the same location as the + destination. Up to 50 resources can be + migrated in one batch. + + This corresponds to the ``migrate_resource_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` + Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, migrate_resource_requests]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = migration_service.BatchMigrateResourcesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if migrate_resource_requests: + request.migrate_resource_requests.extend(migrate_resource_requests) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_migrate_resources, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + migration_service.BatchMigrateResourcesResponse, + metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MigrationServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py new file mode 100644 index 0000000000..bdd4821e25 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -0,0 +1,617 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.migration_service import pagers +from google.cloud.aiplatform_v1beta1.types import migratable_resource +from google.cloud.aiplatform_v1beta1.types import migration_service +from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import MigrationServiceGrpcTransport +from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport + + +class MigrationServiceClientMeta(type): + """Metaclass for the MigrationService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] + _transport_registry["grpc"] = MigrationServiceGrpcTransport + _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[MigrationServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MigrationServiceClient(metaclass=MigrationServiceClientMeta): + """A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MigrationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MigrationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + MigrationServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def annotated_dataset_path(project: str,dataset: str,annotated_dataset: str,) -> str: + """Returns a fully-qualified annotated_dataset string.""" + return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) + + @staticmethod + def parse_annotated_dataset_path(path: str) -> Dict[str,str]: + """Parses a annotated_dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def dataset_path(project: str,location: str,dataset: str,) -> str: + """Returns a fully-qualified dataset string.""" + return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + + @staticmethod + def parse_dataset_path(path: str) -> Dict[str,str]: + """Parses a dataset path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def version_path(project: str,model: str,version: str,) -> str: + """Returns a fully-qualified version string.""" + return "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) + + @staticmethod + def parse_version_path(path: str) -> Dict[str,str]: + """Parses a version path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, MigrationServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the migration service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MigrationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MigrationServiceTransport): + # transport is a MigrationServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def search_migratable_resources(self, + request: migration_service.SearchMigratableResourcesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.SearchMigratableResourcesPager: + r"""Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): + The request object. Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + parent (str): + Required. The location that the migratable resources + should be searched from. It's the Vertex AI location + that the resources can be migrated to, not the + resources' original location. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager: + Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a migration_service.SearchMigratableResourcesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, migration_service.SearchMigratableResourcesRequest): + request = migration_service.SearchMigratableResourcesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.search_migratable_resources] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.SearchMigratableResourcesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def batch_migrate_resources(self, + request: migration_service.BatchMigrateResourcesRequest = None, + *, + parent: str = None, + migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Args: + request (google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest): + The request object. Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + parent (str): + Required. The location of the migrated resource will + live in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + migrate_resource_requests (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]): + Required. The request messages + specifying the resources to migrate. + They must be in the same location as the + destination. Up to 50 resources can be + migrated in one batch. + + This corresponds to the ``migrate_resource_requests`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` + Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, migrate_resource_requests]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a migration_service.BatchMigrateResourcesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, migration_service.BatchMigrateResourcesRequest): + request = migration_service.BatchMigrateResourcesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if migrate_resource_requests is not None: + request.migrate_resource_requests = migrate_resource_requests + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + migration_service.BatchMigrateResourcesResponse, + metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MigrationServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py new file mode 100644 index 0000000000..c15610a912 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import migratable_resource +from google.cloud.aiplatform_v1beta1.types import migration_service + + +class SearchMigratableResourcesPager: + """A pager for iterating through ``search_migratable_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``migratable_resources`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``SearchMigratableResources`` requests and continue to iterate + through the ``migratable_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., migration_service.SearchMigratableResourcesResponse], + request: migration_service.SearchMigratableResourcesRequest, + response: migration_service.SearchMigratableResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = migration_service.SearchMigratableResourcesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[migration_service.SearchMigratableResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[migratable_resource.MigratableResource]: + for page in self.pages: + yield from page.migratable_resources + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class SearchMigratableResourcesAsyncPager: + """A pager for iterating through ``search_migratable_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``migratable_resources`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``SearchMigratableResources`` requests and continue to iterate + through the ``migratable_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[migration_service.SearchMigratableResourcesResponse]], + request: migration_service.SearchMigratableResourcesRequest, + response: migration_service.SearchMigratableResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = migration_service.SearchMigratableResourcesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[migration_service.SearchMigratableResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[migratable_resource.MigratableResource]: + async def async_generator(): + async for page in self.pages: + for response in page.migratable_resources: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py new file mode 100644 index 0000000000..8f036c410e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import MigrationServiceTransport +from .grpc import MigrationServiceGrpcTransport +from .grpc_asyncio import MigrationServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] +_transport_registry['grpc'] = MigrationServiceGrpcTransport +_transport_registry['grpc_asyncio'] = MigrationServiceGrpcAsyncIOTransport + +__all__ = ( + 'MigrationServiceTransport', + 'MigrationServiceGrpcTransport', + 'MigrationServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py new file mode 100644 index 0000000000..2d86b81d6b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class MigrationServiceTransport(abc.ABC): + """Abstract transport class for MigrationService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.search_migratable_resources: gapic_v1.method.wrap_method( + self.search_migratable_resources, + default_timeout=None, + client_info=client_info, + ), + self.batch_migrate_resources: gapic_v1.method.wrap_method( + self.batch_migrate_resources, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + Union[ + migration_service.SearchMigratableResourcesResponse, + Awaitable[migration_service.SearchMigratableResourcesResponse] + ]]: + raise NotImplementedError() + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'MigrationServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py new file mode 100644 index 0000000000..4729745415 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.longrunning import operations_pb2 # type: ignore +from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO + + +class MigrationServiceGrpcTransport(MigrationServiceTransport): + """gRPC backend transport for MigrationService. + + A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + migration_service.SearchMigratableResourcesResponse]: + r"""Return a callable for the search migratable resources method over gRPC. + + Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Returns: + Callable[[~.SearchMigratableResourcesRequest], + ~.SearchMigratableResourcesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_migratable_resources' not in self._stubs: + self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources', + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) + return self._stubs['search_migratable_resources'] + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + operations_pb2.Operation]: + r"""Return a callable for the batch migrate resources method over gRPC. + + Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Returns: + Callable[[~.BatchMigrateResourcesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_migrate_resources' not in self._stubs: + self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources', + request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_migrate_resources'] + + +__all__ = ( + 'MigrationServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..5643065d24 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.longrunning import operations_pb2 # type: ignore +from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import MigrationServiceGrpcTransport + + +class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): + """gRPC AsyncIO backend transport for MigrationService. + + A service that migrates resources from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def search_migratable_resources(self) -> Callable[ + [migration_service.SearchMigratableResourcesRequest], + Awaitable[migration_service.SearchMigratableResourcesResponse]]: + r"""Return a callable for the search migratable resources method over gRPC. + + Searches all of the resources in + automl.googleapis.com, datalabeling.googleapis.com and + ml.googleapis.com that can be migrated to Vertex AI's + given location. + + Returns: + Callable[[~.SearchMigratableResourcesRequest], + Awaitable[~.SearchMigratableResourcesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'search_migratable_resources' not in self._stubs: + self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources', + request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, + response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, + ) + return self._stubs['search_migratable_resources'] + + @property + def batch_migrate_resources(self) -> Callable[ + [migration_service.BatchMigrateResourcesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the batch migrate resources method over gRPC. + + Batch migrates resources from ml.googleapis.com, + automl.googleapis.com, and datalabeling.googleapis.com + to Vertex AI. + + Returns: + Callable[[~.BatchMigrateResourcesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'batch_migrate_resources' not in self._stubs: + self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources', + request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['batch_migrate_resources'] + + +__all__ = ( + 'MigrationServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py new file mode 100644 index 0000000000..5c4d570d15 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ModelServiceClient +from .async_client import ModelServiceAsyncClient + +__all__ = ( + 'ModelServiceClient', + 'ModelServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py new file mode 100644 index 0000000000..3d71a64ff8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -0,0 +1,1060 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.model_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport +from .client import ModelServiceClient + + +class ModelServiceAsyncClient: + """A service for managing Vertex AI's machine learning Models.""" + + _client: ModelServiceClient + + DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(ModelServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(ModelServiceClient.parse_endpoint_path) + model_path = staticmethod(ModelServiceClient.model_path) + parse_model_path = staticmethod(ModelServiceClient.parse_model_path) + model_evaluation_path = staticmethod(ModelServiceClient.model_evaluation_path) + parse_model_evaluation_path = staticmethod(ModelServiceClient.parse_model_evaluation_path) + model_evaluation_slice_path = staticmethod(ModelServiceClient.model_evaluation_slice_path) + parse_model_evaluation_slice_path = staticmethod(ModelServiceClient.parse_model_evaluation_slice_path) + training_pipeline_path = staticmethod(ModelServiceClient.training_pipeline_path) + parse_training_pipeline_path = staticmethod(ModelServiceClient.parse_training_pipeline_path) + common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(ModelServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(ModelServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path) + common_project_path = staticmethod(ModelServiceClient.common_project_path) + parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path) + common_location_path = staticmethod(ModelServiceClient.common_location_path) + parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceAsyncClient: The constructed client. + """ + return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, ModelServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ModelServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def upload_model(self, + request: model_service.UploadModelRequest = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Uploads a Model artifact into Vertex AI. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UploadModelRequest`): + The request object. Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. + parent (:class:`str`): + Required. The resource name of the Location into which + to upload the Model. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): + Required. The Model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` + Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.UploadModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.upload_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_service.UploadModelResponse, + metadata_type=model_service.UploadModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model(self, + request: model_service.GetModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a Model. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelRequest`): + The request object. Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. + name (:class:`str`): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_models(self, + request: model_service.ListModelsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsAsyncPager: + r"""Lists Models in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelsRequest`): + The request object. Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. + parent (:class:`str`): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsAsyncPager: + Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_models, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_model(self, + request: model_service.UpdateModelRequest = None, + *, + model: gca_model.Model = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a Model. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateModelRequest`): + The request object. Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. + model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): + Required. The Model which replaces + the resource on the server. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.UpdateModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_model(self, + request: model_service.DeleteModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Model. + Note: Model can only be deleted if there are no + DeployedModels created from it. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteModelRequest`): + The request object. Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. + name (:class:`str`): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.DeleteModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def export_model(self, + request: model_service.ExportModelRequest = None, + *, + name: str = None, + output_config: model_service.ExportModelRequest.OutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Exports a trained, exportable, Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ExportModelRequest`): + The request object. Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. + name (:class:`str`): + Required. The resource name of the Model to export. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (:class:`google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig`): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` + Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ExportModelRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_model, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + model_service.ExportModelResponse, + metadata_type=model_service.ExportModelOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation(self, + request: model_service.GetModelEvaluationRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a ModelEvaluation. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest`): + The request object. Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. + name (:class:`str`): + Required. The name of the ModelEvaluation resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelEvaluationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluations(self, + request: model_service.ListModelEvaluationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsAsyncPager: + r"""Lists ModelEvaluations in a Model. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest`): + The request object. Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + parent (:class:`str`): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsAsyncPager: + Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelEvaluationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluations, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_model_evaluation_slice(self, + request: model_service.GetModelEvaluationSliceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation_slice.ModelEvaluationSlice: + r"""Gets a ModelEvaluationSlice. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest`): + The request object. Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. + name (:class:`str`): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice: + A collection of metrics calculated by + comparing Model's predictions on a slice + of the test data against ground truth + annotations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.GetModelEvaluationSliceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_model_evaluation_slice, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_model_evaluation_slices(self, + request: model_service.ListModelEvaluationSlicesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationSlicesAsyncPager: + r"""Lists ModelEvaluationSlices in a ModelEvaluation. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest`): + The request object. Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + parent (:class:`str`): + Required. The resource name of the ModelEvaluation to + list the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: + Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = model_service.ListModelEvaluationSlicesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_model_evaluation_slices, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListModelEvaluationSlicesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ModelServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py new file mode 100644 index 0000000000..565b9f0598 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -0,0 +1,1283 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.model_service import pagers +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ModelServiceGrpcTransport +from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +class ModelServiceClientMeta(type): + """Metaclass for the ModelService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] + _transport_registry["grpc"] = ModelServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ModelServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ModelServiceClient(metaclass=ModelServiceClientMeta): + """A service for managing Vertex AI's machine learning Models.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ModelServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ModelServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ModelServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_path(project: str,location: str,model: str,evaluation: str,) -> str: + """Returns a fully-qualified model_evaluation string.""" + return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) + + @staticmethod + def parse_model_evaluation_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_evaluation_slice_path(project: str,location: str,model: str,evaluation: str,slice: str,) -> str: + """Returns a fully-qualified model_evaluation_slice string.""" + return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) + + @staticmethod + def parse_model_evaluation_slice_path(path: str) -> Dict[str,str]: + """Parses a model_evaluation_slice path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: + """Returns a fully-qualified training_pipeline string.""" + return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + + @staticmethod + def parse_training_pipeline_path(path: str) -> Dict[str,str]: + """Parses a training_pipeline path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ModelServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the model service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ModelServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ModelServiceTransport): + # transport is a ModelServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def upload_model(self, + request: model_service.UploadModelRequest = None, + *, + parent: str = None, + model: gca_model.Model = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Uploads a Model artifact into Vertex AI. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UploadModelRequest): + The request object. Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. + parent (str): + Required. The resource name of the Location into which + to upload the Model. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + model (google.cloud.aiplatform_v1beta1.types.Model): + Required. The Model to create. + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` + Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, model]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UploadModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UploadModelRequest): + request = model_service.UploadModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if model is not None: + request.model = model + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.upload_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + model_service.UploadModelResponse, + metadata_type=model_service.UploadModelOperationMetadata, + ) + + # Done; return the response. + return response + + def get_model(self, + request: model_service.GetModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model.Model: + r"""Gets a Model. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetModelRequest): + The request object. Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. + name (str): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelRequest): + request = model_service.GetModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_models(self, + request: model_service.ListModelsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelsPager: + r"""Lists Models in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): + The request object. Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. + parent (str): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsPager: + Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelsRequest): + request = model_service.ListModelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_models] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_model(self, + request: model_service.UpdateModelRequest = None, + *, + model: gca_model.Model = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_model.Model: + r"""Updates a Model. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateModelRequest): + The request object. Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. + model (google.cloud.aiplatform_v1beta1.types.Model): + Required. The Model which replaces + the resource on the server. + + This corresponds to the ``model`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For + the ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Model: + A trained machine learning Model. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([model, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.UpdateModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.UpdateModelRequest): + request = model_service.UpdateModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if model is not None: + request.model = model + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("model.name", request.model.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_model(self, + request: model_service.DeleteModelRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Model. + Note: Model can only be deleted if there are no + DeployedModels created from it. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteModelRequest): + The request object. Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. + name (str): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.DeleteModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.DeleteModelRequest): + request = model_service.DeleteModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def export_model(self, + request: model_service.ExportModelRequest = None, + *, + name: str = None, + output_config: model_service.ExportModelRequest.OutputConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Exports a trained, exportable, Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ExportModelRequest): + The request object. Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. + name (str): + Required. The resource name of the Model to export. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + output_config (google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig): + Required. The desired output location + and configuration. + + This corresponds to the ``output_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` + Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + operation. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, output_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ExportModelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ExportModelRequest): + request = model_service.ExportModelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if output_config is not None: + request.output_config = output_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_model] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + model_service.ExportModelResponse, + metadata_type=model_service.ExportModelOperationMetadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation(self, + request: model_service.GetModelEvaluationRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation.ModelEvaluation: + r"""Gets a ModelEvaluation. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest): + The request object. Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. + name (str): + Required. The name of the ModelEvaluation resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluation: + A collection of metrics calculated by + comparing Model's predictions on all of + the test data against annotations from + the test data. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelEvaluationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelEvaluationRequest): + request = model_service.GetModelEvaluationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluations(self, + request: model_service.ListModelEvaluationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationsPager: + r"""Lists ModelEvaluations in a Model. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): + The request object. Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + parent (str): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsPager: + Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelEvaluationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelEvaluationsRequest): + request = model_service.ListModelEvaluationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_model_evaluation_slice(self, + request: model_service.GetModelEvaluationSliceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> model_evaluation_slice.ModelEvaluationSlice: + r"""Gets a ModelEvaluationSlice. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest): + The request object. Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. + name (str): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice: + A collection of metrics calculated by + comparing Model's predictions on a slice + of the test data against ground truth + annotations. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.GetModelEvaluationSliceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.GetModelEvaluationSliceRequest): + request = model_service.GetModelEvaluationSliceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation_slice] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_model_evaluation_slices(self, + request: model_service.ListModelEvaluationSlicesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListModelEvaluationSlicesPager: + r"""Lists ModelEvaluationSlices in a ModelEvaluation. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): + The request object. Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + parent (str): + Required. The resource name of the ModelEvaluation to + list the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesPager: + Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a model_service.ListModelEvaluationSlicesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, model_service.ListModelEvaluationSlicesRequest): + request = model_service.ListModelEvaluationSlicesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_model_evaluation_slices] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListModelEvaluationSlicesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ModelServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py new file mode 100644 index 0000000000..1825a3c784 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py @@ -0,0 +1,387 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service + + +class ListModelsPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelsResponse], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model.Model]: + for page in self.pages: + yield from page.models + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelsAsyncPager: + """A pager for iterating through ``list_models`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``models`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModels`` requests and continue to iterate + through the ``models`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelsResponse]], + request: model_service.ListModelsRequest, + response: model_service.ListModelsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[model_service.ListModelsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model.Model]: + async def async_generator(): + async for page in self.pages: + for response in page.models: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluations`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelEvaluationsResponse], + request: model_service.ListModelEvaluationsRequest, + response: model_service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[model_service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]: + for page in self.pages: + yield from page.model_evaluations + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationsAsyncPager: + """A pager for iterating through ``list_model_evaluations`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluations`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluations`` requests and continue to iterate + through the ``model_evaluations`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]], + request: model_service.ListModelEvaluationsRequest, + response: model_service.ListModelEvaluationsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluations: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationSlicesPager: + """A pager for iterating through ``list_model_evaluation_slices`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``model_evaluation_slices`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListModelEvaluationSlices`` requests and continue to iterate + through the ``model_evaluation_slices`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., model_service.ListModelEvaluationSlicesResponse], + request: model_service.ListModelEvaluationSlicesRequest, + response: model_service.ListModelEvaluationSlicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationSlicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[model_service.ListModelEvaluationSlicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[model_evaluation_slice.ModelEvaluationSlice]: + for page in self.pages: + yield from page.model_evaluation_slices + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListModelEvaluationSlicesAsyncPager: + """A pager for iterating through ``list_model_evaluation_slices`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``model_evaluation_slices`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListModelEvaluationSlices`` requests and continue to iterate + through the ``model_evaluation_slices`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[model_service.ListModelEvaluationSlicesResponse]], + request: model_service.ListModelEvaluationSlicesRequest, + response: model_service.ListModelEvaluationSlicesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = model_service.ListModelEvaluationSlicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationSlicesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[model_evaluation_slice.ModelEvaluationSlice]: + async def async_generator(): + async for page in self.pages: + for response in page.model_evaluation_slices: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py new file mode 100644 index 0000000000..0f09224d3c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ModelServiceTransport +from .grpc import ModelServiceGrpcTransport +from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] +_transport_registry['grpc'] = ModelServiceGrpcTransport +_transport_registry['grpc_asyncio'] = ModelServiceGrpcAsyncIOTransport + +__all__ = ( + 'ModelServiceTransport', + 'ModelServiceGrpcTransport', + 'ModelServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py new file mode 100644 index 0000000000..f8b2d01811 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py @@ -0,0 +1,305 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class ModelServiceTransport(abc.ABC): + """Abstract transport class for ModelService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.upload_model: gapic_v1.method.wrap_method( + self.upload_model, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model: gapic_v1.method.wrap_method( + self.get_model, + default_timeout=5.0, + client_info=client_info, + ), + self.list_models: gapic_v1.method.wrap_method( + self.list_models, + default_timeout=5.0, + client_info=client_info, + ), + self.update_model: gapic_v1.method.wrap_method( + self.update_model, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_model: gapic_v1.method.wrap_method( + self.delete_model, + default_timeout=5.0, + client_info=client_info, + ), + self.export_model: gapic_v1.method.wrap_method( + self.export_model, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_evaluation: gapic_v1.method.wrap_method( + self.get_model_evaluation, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluations: gapic_v1.method.wrap_method( + self.list_model_evaluations, + default_timeout=5.0, + client_info=client_info, + ), + self.get_model_evaluation_slice: gapic_v1.method.wrap_method( + self.get_model_evaluation_slice, + default_timeout=5.0, + client_info=client_info, + ), + self.list_model_evaluation_slices: gapic_v1.method.wrap_method( + self.list_model_evaluation_slices, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + Union[ + model.Model, + Awaitable[model.Model] + ]]: + raise NotImplementedError() + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + Union[ + model_service.ListModelsResponse, + Awaitable[model_service.ListModelsResponse] + ]]: + raise NotImplementedError() + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + Union[ + gca_model.Model, + Awaitable[gca_model.Model] + ]]: + raise NotImplementedError() + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + Union[ + model_evaluation.ModelEvaluation, + Awaitable[model_evaluation.ModelEvaluation] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + Union[ + model_service.ListModelEvaluationsResponse, + Awaitable[model_service.ListModelEvaluationsResponse] + ]]: + raise NotImplementedError() + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + Union[ + model_evaluation_slice.ModelEvaluationSlice, + Awaitable[model_evaluation_slice.ModelEvaluationSlice] + ]]: + raise NotImplementedError() + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + Union[ + model_service.ListModelEvaluationSlicesResponse, + Awaitable[model_service.ListModelEvaluationSlicesResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ModelServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py new file mode 100644 index 0000000000..e699f90533 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -0,0 +1,514 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO + + +class ModelServiceGrpcTransport(ModelServiceTransport): + """gRPC backend transport for ModelService. + + A service for managing Vertex AI's machine learning Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the upload model method over gRPC. + + Uploads a Model artifact into Vertex AI. + + Returns: + Callable[[~.UploadModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'upload_model' not in self._stubs: + self._stubs['upload_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UploadModel', + request_serializer=model_service.UploadModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['upload_model'] + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + model.Model]: + r"""Return a callable for the get model method over gRPC. + + Gets a Model. + + Returns: + Callable[[~.GetModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModel', + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + model_service.ListModelsResponse]: + r"""Return a callable for the list models method over gRPC. + + Lists Models in a Location. + + Returns: + Callable[[~.ListModelsRequest], + ~.ListModelsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModels', + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + gca_model.Model]: + r"""Return a callable for the update model method over gRPC. + + Updates a Model. + + Returns: + Callable[[~.UpdateModelRequest], + ~.Model]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UpdateModel', + request_serializer=model_service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a Model. + Note: Model can only be deleted if there are no + DeployedModels created from it. + + Returns: + Callable[[~.DeleteModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModel', + request_serializer=model_service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + operations_pb2.Operation]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, exportable, Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + + Returns: + Callable[[~.ExportModelRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ExportModel', + request_serializer=model_service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + model_evaluation.ModelEvaluation]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a ModelEvaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + ~.ModelEvaluation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluation', + request_serializer=model_service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + model_service.ListModelEvaluationsResponse]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists ModelEvaluations in a Model. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + ~.ListModelEvaluationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluations', + request_serializer=model_service.ListModelEvaluationsRequest.serialize, + response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + model_evaluation_slice.ModelEvaluationSlice]: + r"""Return a callable for the get model evaluation slice method over gRPC. + + Gets a ModelEvaluationSlice. + + Returns: + Callable[[~.GetModelEvaluationSliceRequest], + ~.ModelEvaluationSlice]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation_slice' not in self._stubs: + self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluationSlice', + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) + return self._stubs['get_model_evaluation_slice'] + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + model_service.ListModelEvaluationSlicesResponse]: + r"""Return a callable for the list model evaluation slices method over gRPC. + + Lists ModelEvaluationSlices in a ModelEvaluation. + + Returns: + Callable[[~.ListModelEvaluationSlicesRequest], + ~.ListModelEvaluationSlicesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluation_slices' not in self._stubs: + self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluationSlices', + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) + return self._stubs['list_model_evaluation_slices'] + + +__all__ = ( + 'ModelServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..0fdae7300c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -0,0 +1,518 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.longrunning import operations_pb2 # type: ignore +from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import ModelServiceGrpcTransport + + +class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): + """gRPC AsyncIO backend transport for ModelService. + + A service for managing Vertex AI's machine learning Models. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def upload_model(self) -> Callable[ + [model_service.UploadModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the upload model method over gRPC. + + Uploads a Model artifact into Vertex AI. + + Returns: + Callable[[~.UploadModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'upload_model' not in self._stubs: + self._stubs['upload_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UploadModel', + request_serializer=model_service.UploadModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['upload_model'] + + @property + def get_model(self) -> Callable[ + [model_service.GetModelRequest], + Awaitable[model.Model]]: + r"""Return a callable for the get model method over gRPC. + + Gets a Model. + + Returns: + Callable[[~.GetModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model' not in self._stubs: + self._stubs['get_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModel', + request_serializer=model_service.GetModelRequest.serialize, + response_deserializer=model.Model.deserialize, + ) + return self._stubs['get_model'] + + @property + def list_models(self) -> Callable[ + [model_service.ListModelsRequest], + Awaitable[model_service.ListModelsResponse]]: + r"""Return a callable for the list models method over gRPC. + + Lists Models in a Location. + + Returns: + Callable[[~.ListModelsRequest], + Awaitable[~.ListModelsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_models' not in self._stubs: + self._stubs['list_models'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModels', + request_serializer=model_service.ListModelsRequest.serialize, + response_deserializer=model_service.ListModelsResponse.deserialize, + ) + return self._stubs['list_models'] + + @property + def update_model(self) -> Callable[ + [model_service.UpdateModelRequest], + Awaitable[gca_model.Model]]: + r"""Return a callable for the update model method over gRPC. + + Updates a Model. + + Returns: + Callable[[~.UpdateModelRequest], + Awaitable[~.Model]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_model' not in self._stubs: + self._stubs['update_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/UpdateModel', + request_serializer=model_service.UpdateModelRequest.serialize, + response_deserializer=gca_model.Model.deserialize, + ) + return self._stubs['update_model'] + + @property + def delete_model(self) -> Callable[ + [model_service.DeleteModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete model method over gRPC. + + Deletes a Model. + Note: Model can only be deleted if there are no + DeployedModels created from it. + + Returns: + Callable[[~.DeleteModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_model' not in self._stubs: + self._stubs['delete_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModel', + request_serializer=model_service.DeleteModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_model'] + + @property + def export_model(self) -> Callable[ + [model_service.ExportModelRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the export model method over gRPC. + + Exports a trained, exportable, Model to a location specified by + the user. A Model is considered to be exportable if it has at + least one [supported export + format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + + Returns: + Callable[[~.ExportModelRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_model' not in self._stubs: + self._stubs['export_model'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ExportModel', + request_serializer=model_service.ExportModelRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['export_model'] + + @property + def get_model_evaluation(self) -> Callable[ + [model_service.GetModelEvaluationRequest], + Awaitable[model_evaluation.ModelEvaluation]]: + r"""Return a callable for the get model evaluation method over gRPC. + + Gets a ModelEvaluation. + + Returns: + Callable[[~.GetModelEvaluationRequest], + Awaitable[~.ModelEvaluation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation' not in self._stubs: + self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluation', + request_serializer=model_service.GetModelEvaluationRequest.serialize, + response_deserializer=model_evaluation.ModelEvaluation.deserialize, + ) + return self._stubs['get_model_evaluation'] + + @property + def list_model_evaluations(self) -> Callable[ + [model_service.ListModelEvaluationsRequest], + Awaitable[model_service.ListModelEvaluationsResponse]]: + r"""Return a callable for the list model evaluations method over gRPC. + + Lists ModelEvaluations in a Model. + + Returns: + Callable[[~.ListModelEvaluationsRequest], + Awaitable[~.ListModelEvaluationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluations' not in self._stubs: + self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluations', + request_serializer=model_service.ListModelEvaluationsRequest.serialize, + response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, + ) + return self._stubs['list_model_evaluations'] + + @property + def get_model_evaluation_slice(self) -> Callable[ + [model_service.GetModelEvaluationSliceRequest], + Awaitable[model_evaluation_slice.ModelEvaluationSlice]]: + r"""Return a callable for the get model evaluation slice method over gRPC. + + Gets a ModelEvaluationSlice. + + Returns: + Callable[[~.GetModelEvaluationSliceRequest], + Awaitable[~.ModelEvaluationSlice]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_model_evaluation_slice' not in self._stubs: + self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluationSlice', + request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, + response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, + ) + return self._stubs['get_model_evaluation_slice'] + + @property + def list_model_evaluation_slices(self) -> Callable[ + [model_service.ListModelEvaluationSlicesRequest], + Awaitable[model_service.ListModelEvaluationSlicesResponse]]: + r"""Return a callable for the list model evaluation slices method over gRPC. + + Lists ModelEvaluationSlices in a ModelEvaluation. + + Returns: + Callable[[~.ListModelEvaluationSlicesRequest], + Awaitable[~.ListModelEvaluationSlicesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_model_evaluation_slices' not in self._stubs: + self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluationSlices', + request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, + response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, + ) + return self._stubs['list_model_evaluation_slices'] + + +__all__ = ( + 'ModelServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py new file mode 100644 index 0000000000..539616023d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PipelineServiceClient +from .async_client import PipelineServiceAsyncClient + +__all__ = ( + 'PipelineServiceClient', + 'PipelineServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py new file mode 100644 index 0000000000..98a7435d45 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -0,0 +1,1069 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport +from .client import PipelineServiceClient + + +class PipelineServiceAsyncClient: + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + """ + + _client: PipelineServiceClient + + DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT + + artifact_path = staticmethod(PipelineServiceClient.artifact_path) + parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) + context_path = staticmethod(PipelineServiceClient.context_path) + parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) + custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) + endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) + execution_path = staticmethod(PipelineServiceClient.execution_path) + parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) + model_path = staticmethod(PipelineServiceClient.model_path) + parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) + network_path = staticmethod(PipelineServiceClient.network_path) + parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) + pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) + parse_pipeline_job_path = staticmethod(PipelineServiceClient.parse_pipeline_job_path) + training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) + parse_training_pipeline_path = staticmethod(PipelineServiceClient.parse_training_pipeline_path) + common_billing_account_path = staticmethod(PipelineServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PipelineServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PipelineServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PipelineServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PipelineServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PipelineServiceClient.common_project_path) + parse_common_project_path = staticmethod(PipelineServiceClient.parse_common_project_path) + common_location_path = staticmethod(PipelineServiceClient.common_location_path) + parse_common_location_path = staticmethod(PipelineServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceAsyncClient: The constructed client. + """ + return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceAsyncClient: The constructed client. + """ + return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PipelineServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PipelineServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the pipeline service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PipelineServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PipelineServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_training_pipeline(self, + request: pipeline_service.CreateTrainingPipelineRequest = None, + *, + parent: str = None, + training_pipeline: gca_training_pipeline.TrainingPipeline = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_training_pipeline.TrainingPipeline: + r"""Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest`): + The request object. Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. + parent (:class:`str`): + Required. The resource name of the Location to create + the TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + training_pipeline (:class:`google.cloud.aiplatform_v1beta1.types.TrainingPipeline`): + Required. The TrainingPipeline to + create. + + This corresponds to the ``training_pipeline`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, training_pipeline]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CreateTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if training_pipeline is not None: + request.training_pipeline = training_pipeline + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_training_pipeline(self, + request: pipeline_service.GetTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> training_pipeline.TrainingPipeline: + r"""Gets a TrainingPipeline. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest`): + The request object. Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline resource. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.GetTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_training_pipelines(self, + request: pipeline_service.ListTrainingPipelinesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrainingPipelinesAsyncPager: + r"""Lists TrainingPipelines in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest`): + The request object. Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. + parent (:class:`str`): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: + Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.ListTrainingPipelinesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_training_pipelines, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTrainingPipelinesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_training_pipeline(self, + request: pipeline_service.DeleteTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TrainingPipeline. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest`): + The request object. Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline resource to + be deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.DeleteTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_training_pipeline(self, + request: pipeline_service.CancelTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest`): + The request object. Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. + name (:class:`str`): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CancelTrainingPipelineRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_training_pipeline, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_pipeline_job(self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest`): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (:class:`google.cloud.aiplatform_v1beta1.types.PipelineJob`): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (:class:`str`): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CreatePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_pipeline_job(self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest`): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.GetPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_pipeline_jobs(self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsAsyncPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest`): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.ListPipelineJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_pipeline_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPipelineJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_pipeline_job(self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest`): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.DeletePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_pipeline_job(self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest`): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = pipeline_service.CancelPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PipelineServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py new file mode 100644 index 0000000000..df8276c2e8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -0,0 +1,1328 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PipelineServiceGrpcTransport +from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport + + +class PipelineServiceClientMeta(type): + """Metaclass for the PipelineService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] + _transport_registry["grpc"] = PipelineServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PipelineServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PipelineServiceClient(metaclass=PipelineServiceClientMeta): + """A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PipelineServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PipelineServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PipelineServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str,str]: + """Parses a artifact path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str,str]: + """Parses a context path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str,str]: + """Parses a execution path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def model_path(project: str,location: str,model: str,) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str,str]: + """Parses a model path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def network_path(project: str,network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str,str]: + """Parses a network path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def pipeline_job_path(project: str,location: str,pipeline_job: str,) -> str: + """Returns a fully-qualified pipeline_job string.""" + return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) + + @staticmethod + def parse_pipeline_job_path(path: str) -> Dict[str,str]: + """Parses a pipeline_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: + """Returns a fully-qualified training_pipeline string.""" + return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + + @staticmethod + def parse_training_pipeline_path(path: str) -> Dict[str,str]: + """Parses a training_pipeline path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PipelineServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the pipeline service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PipelineServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PipelineServiceTransport): + # transport is a PipelineServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_training_pipeline(self, + request: pipeline_service.CreateTrainingPipelineRequest = None, + *, + parent: str = None, + training_pipeline: gca_training_pipeline.TrainingPipeline = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_training_pipeline.TrainingPipeline: + r"""Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest): + The request object. Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. + parent (str): + Required. The resource name of the Location to create + the TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + training_pipeline (google.cloud.aiplatform_v1beta1.types.TrainingPipeline): + Required. The TrainingPipeline to + create. + + This corresponds to the ``training_pipeline`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, training_pipeline]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreateTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreateTrainingPipelineRequest): + request = pipeline_service.CreateTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if training_pipeline is not None: + request.training_pipeline = training_pipeline + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_training_pipeline(self, + request: pipeline_service.GetTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> training_pipeline.TrainingPipeline: + r"""Gets a TrainingPipeline. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest): + The request object. Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline resource. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TrainingPipeline: + The TrainingPipeline orchestrates tasks associated with training a Model. It + always executes the training task, and optionally may + also export data from Vertex AI's Dataset which + becomes the training input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetTrainingPipelineRequest): + request = pipeline_service.GetTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_training_pipelines(self, + request: pipeline_service.ListTrainingPipelinesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrainingPipelinesPager: + r"""Lists TrainingPipelines in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): + The request object. Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. + parent (str): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesPager: + Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListTrainingPipelinesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListTrainingPipelinesRequest): + request = pipeline_service.ListTrainingPipelinesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_training_pipelines] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTrainingPipelinesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_training_pipeline(self, + request: pipeline_service.DeleteTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TrainingPipeline. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest): + The request object. Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline resource to + be deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeleteTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeleteTrainingPipelineRequest): + request = pipeline_service.DeleteTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_training_pipeline(self, + request: pipeline_service.CancelTrainingPipelineRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest): + The request object. Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. + name (str): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelTrainingPipelineRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelTrainingPipelineRequest): + request = pipeline_service.CancelTrainingPipelineRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_training_pipeline] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_pipeline_job(self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. + parent (str): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreatePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreatePipelineJobRequest): + request = pipeline_service.CreatePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_pipeline_job(self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetPipelineJobRequest): + request = pipeline_service.GetPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_pipeline_jobs(self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListPipelineJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListPipelineJobsRequest): + request = pipeline_service.ListPipelineJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPipelineJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_pipeline_job(self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeletePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeletePipelineJobRequest): + request = pipeline_service.DeletePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_pipeline_job(self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelPipelineJobRequest): + request = pipeline_service.CancelPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PipelineServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py new file mode 100644 index 0000000000..b997210da0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import training_pipeline + + +class ListTrainingPipelinesPager: + """A pager for iterating through ``list_training_pipelines`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``training_pipelines`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTrainingPipelines`` requests and continue to iterate + through the ``training_pipelines`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., pipeline_service.ListTrainingPipelinesResponse], + request: pipeline_service.ListTrainingPipelinesRequest, + response: pipeline_service.ListTrainingPipelinesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListTrainingPipelinesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[pipeline_service.ListTrainingPipelinesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[training_pipeline.TrainingPipeline]: + for page in self.pages: + yield from page.training_pipelines + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrainingPipelinesAsyncPager: + """A pager for iterating through ``list_training_pipelines`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``training_pipelines`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTrainingPipelines`` requests and continue to iterate + through the ``training_pipelines`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[pipeline_service.ListTrainingPipelinesResponse]], + request: pipeline_service.ListTrainingPipelinesRequest, + response: pipeline_service.ListTrainingPipelinesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListTrainingPipelinesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[pipeline_service.ListTrainingPipelinesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[training_pipeline.TrainingPipeline]: + async def async_generator(): + async for page in self.pages: + for response in page.training_pipelines: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPipelineJobsPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., pipeline_service.ListPipelineJobsResponse], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[pipeline_job.PipelineJob]: + for page in self.pages: + yield from page.pipeline_jobs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPipelineJobsAsyncPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[pipeline_job.PipelineJob]: + async def async_generator(): + async for page in self.pages: + for response in page.pipeline_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py new file mode 100644 index 0000000000..77051d8254 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PipelineServiceTransport +from .grpc import PipelineServiceGrpcTransport +from .grpc_asyncio import PipelineServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] +_transport_registry['grpc'] = PipelineServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PipelineServiceGrpcAsyncIOTransport + +__all__ = ( + 'PipelineServiceTransport', + 'PipelineServiceGrpcTransport', + 'PipelineServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py new file mode 100644 index 0000000000..a67aad8787 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py @@ -0,0 +1,306 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class PipelineServiceTransport(abc.ABC): + """Abstract transport class for PipelineService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_training_pipeline: gapic_v1.method.wrap_method( + self.create_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.get_training_pipeline: gapic_v1.method.wrap_method( + self.get_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.list_training_pipelines: gapic_v1.method.wrap_method( + self.list_training_pipelines, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_training_pipeline: gapic_v1.method.wrap_method( + self.delete_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.cancel_training_pipeline: gapic_v1.method.wrap_method( + self.cancel_training_pipeline, + default_timeout=5.0, + client_info=client_info, + ), + self.create_pipeline_job: gapic_v1.method.wrap_method( + self.create_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.get_pipeline_job: gapic_v1.method.wrap_method( + self.get_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.list_pipeline_jobs: gapic_v1.method.wrap_method( + self.list_pipeline_jobs, + default_timeout=None, + client_info=client_info, + ), + self.delete_pipeline_job: gapic_v1.method.wrap_method( + self.delete_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + self.cancel_pipeline_job: gapic_v1.method.wrap_method( + self.cancel_pipeline_job, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + Union[ + gca_training_pipeline.TrainingPipeline, + Awaitable[gca_training_pipeline.TrainingPipeline] + ]]: + raise NotImplementedError() + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + Union[ + training_pipeline.TrainingPipeline, + Awaitable[training_pipeline.TrainingPipeline] + ]]: + raise NotImplementedError() + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + Union[ + pipeline_service.ListTrainingPipelinesResponse, + Awaitable[pipeline_service.ListTrainingPipelinesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Union[ + gca_pipeline_job.PipelineJob, + Awaitable[gca_pipeline_job.PipelineJob] + ]]: + raise NotImplementedError() + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Union[ + pipeline_job.PipelineJob, + Awaitable[pipeline_job.PipelineJob] + ]]: + raise NotImplementedError() + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Union[ + pipeline_service.ListPipelineJobsResponse, + Awaitable[pipeline_service.ListPipelineJobsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'PipelineServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py new file mode 100644 index 0000000000..22b2faf143 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -0,0 +1,539 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO + + +class PipelineServiceGrpcTransport(PipelineServiceTransport): + """gRPC backend transport for PipelineService. + + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + gca_training_pipeline.TrainingPipeline]: + r"""Return a callable for the create training pipeline method over gRPC. + + Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Returns: + Callable[[~.CreateTrainingPipelineRequest], + ~.TrainingPipeline]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_training_pipeline' not in self._stubs: + self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CreateTrainingPipeline', + request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, + response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['create_training_pipeline'] + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + training_pipeline.TrainingPipeline]: + r"""Return a callable for the get training pipeline method over gRPC. + + Gets a TrainingPipeline. + + Returns: + Callable[[~.GetTrainingPipelineRequest], + ~.TrainingPipeline]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_training_pipeline' not in self._stubs: + self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/GetTrainingPipeline', + request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, + response_deserializer=training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['get_training_pipeline'] + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + pipeline_service.ListTrainingPipelinesResponse]: + r"""Return a callable for the list training pipelines method over gRPC. + + Lists TrainingPipelines in a Location. + + Returns: + Callable[[~.ListTrainingPipelinesRequest], + ~.ListTrainingPipelinesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_training_pipelines' not in self._stubs: + self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/ListTrainingPipelines', + request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, + response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, + ) + return self._stubs['list_training_pipelines'] + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete training pipeline method over gRPC. + + Deletes a TrainingPipeline. + + Returns: + Callable[[~.DeleteTrainingPipelineRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_training_pipeline' not in self._stubs: + self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/DeleteTrainingPipeline', + request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_training_pipeline'] + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel training pipeline method over gRPC. + + Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTrainingPipelineRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_training_pipeline' not in self._stubs: + self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CancelTrainingPipeline', + request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_training_pipeline'] + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + gca_pipeline_job.PipelineJob]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_pipeline_job' not in self._stubs: + self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob', + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['create_pipeline_job'] + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + pipeline_job.PipelineJob]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_pipeline_job' not in self._stubs: + self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob', + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['get_pipeline_job'] + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + pipeline_service.ListPipelineJobsResponse]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + ~.ListPipelineJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_pipeline_jobs' not in self._stubs: + self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs', + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs['list_pipeline_jobs'] + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_pipeline_job' not in self._stubs: + self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob', + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_pipeline_job'] + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_pipeline_job' not in self._stubs: + self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob', + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_pipeline_job'] + + +__all__ = ( + 'PipelineServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..a2929cb5dd --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -0,0 +1,543 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PipelineServiceGrpcTransport + + +class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): + """gRPC AsyncIO backend transport for PipelineService. + + A service for creating and managing Vertex AI's pipelines. This + includes both ``TrainingPipeline`` resources (used for AutoML and + custom training) and ``PipelineJob`` resources (used for Vertex + Pipelines). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_training_pipeline(self) -> Callable[ + [pipeline_service.CreateTrainingPipelineRequest], + Awaitable[gca_training_pipeline.TrainingPipeline]]: + r"""Return a callable for the create training pipeline method over gRPC. + + Creates a TrainingPipeline. A created + TrainingPipeline right away will be attempted to be run. + + Returns: + Callable[[~.CreateTrainingPipelineRequest], + Awaitable[~.TrainingPipeline]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_training_pipeline' not in self._stubs: + self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CreateTrainingPipeline', + request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, + response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['create_training_pipeline'] + + @property + def get_training_pipeline(self) -> Callable[ + [pipeline_service.GetTrainingPipelineRequest], + Awaitable[training_pipeline.TrainingPipeline]]: + r"""Return a callable for the get training pipeline method over gRPC. + + Gets a TrainingPipeline. + + Returns: + Callable[[~.GetTrainingPipelineRequest], + Awaitable[~.TrainingPipeline]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_training_pipeline' not in self._stubs: + self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/GetTrainingPipeline', + request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, + response_deserializer=training_pipeline.TrainingPipeline.deserialize, + ) + return self._stubs['get_training_pipeline'] + + @property + def list_training_pipelines(self) -> Callable[ + [pipeline_service.ListTrainingPipelinesRequest], + Awaitable[pipeline_service.ListTrainingPipelinesResponse]]: + r"""Return a callable for the list training pipelines method over gRPC. + + Lists TrainingPipelines in a Location. + + Returns: + Callable[[~.ListTrainingPipelinesRequest], + Awaitable[~.ListTrainingPipelinesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_training_pipelines' not in self._stubs: + self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/ListTrainingPipelines', + request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, + response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, + ) + return self._stubs['list_training_pipelines'] + + @property + def delete_training_pipeline(self) -> Callable[ + [pipeline_service.DeleteTrainingPipelineRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete training pipeline method over gRPC. + + Deletes a TrainingPipeline. + + Returns: + Callable[[~.DeleteTrainingPipelineRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_training_pipeline' not in self._stubs: + self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/DeleteTrainingPipeline', + request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_training_pipeline'] + + @property + def cancel_training_pipeline(self) -> Callable[ + [pipeline_service.CancelTrainingPipelineRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel training pipeline method over gRPC. + + Cancels a TrainingPipeline. Starts asynchronous cancellation on + the TrainingPipeline. The server makes a best effort to cancel + the pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the TrainingPipeline is not deleted; + instead it becomes a pipeline with a + [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTrainingPipelineRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_training_pipeline' not in self._stubs: + self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CancelTrainingPipeline', + request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_training_pipeline'] + + @property + def create_pipeline_job(self) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Awaitable[gca_pipeline_job.PipelineJob]]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_pipeline_job' not in self._stubs: + self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob', + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['create_pipeline_job'] + + @property + def get_pipeline_job(self) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Awaitable[pipeline_job.PipelineJob]]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_pipeline_job' not in self._stubs: + self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob', + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs['get_pipeline_job'] + + @property + def list_pipeline_jobs(self) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Awaitable[pipeline_service.ListPipelineJobsResponse]]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + Awaitable[~.ListPipelineJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_pipeline_jobs' not in self._stubs: + self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs', + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs['list_pipeline_jobs'] + + @property + def delete_pipeline_job(self) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_pipeline_job' not in self._stubs: + self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob', + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_pipeline_job'] + + @property + def cancel_pipeline_job(self) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_pipeline_job' not in self._stubs: + self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob', + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_pipeline_job'] + + +__all__ = ( + 'PipelineServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py new file mode 100644 index 0000000000..13c5d11c66 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PredictionServiceClient +from .async_client import PredictionServiceAsyncClient + +__all__ = ( + 'PredictionServiceClient', + 'PredictionServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py new file mode 100644 index 0000000000..37e645c1d0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -0,0 +1,404 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.protobuf import struct_pb2 # type: ignore +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport +from .client import PredictionServiceClient + + +class PredictionServiceAsyncClient: + """A service for online predictions and explanations.""" + + _client: PredictionServiceClient + + DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT + + endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) + common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) + common_project_path = staticmethod(PredictionServiceClient.common_project_path) + parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) + common_location_path = staticmethod(PredictionServiceClient.common_location_path) + parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PredictionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def predict(self, + request: prediction_service.PredictRequest = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.PredictRequest`): + The request object. Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): + Required. The instances that are the input to the + prediction call. A DeployedModel may have an upper limit + on the number of instances it supports per request, and + when it is exceeded the prediction call errors in case + of AutoML Models, or, in case of customer created + Models, the behaviour is as documented by that Model. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`google.protobuf.struct_pb2.Value`): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.PredictRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if parameters is not None: + request.parameters = parameters + if instances: + request.instances.extend(instances) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.predict, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def explain(self, + request: prediction_service.ExplainRequest = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + deployed_model_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.ExplainResponse: + r"""Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ExplainRequest`): + The request object. Request message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the explanation. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): + Required. The instances that are the input to the + explanation call. A DeployedModel may have an upper + limit on the number of instances it supports per + request, and when it is exceeded the explanation call + errors in case of AutoML Models, or, in case of customer + created Models, the behaviour is as documented by that + Model. The schema of any single instance may be + specified via Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`google.protobuf.struct_pb2.Value`): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (:class:`str`): + If specified, this ExplainRequest will be served by the + chosen DeployedModel, overriding + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ExplainResponse: + Response message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = prediction_service.ExplainRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if parameters is not None: + request.parameters = parameters + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + if instances: + request.instances.extend(instances) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.explain, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PredictionServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py new file mode 100644 index 0000000000..d71300114e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -0,0 +1,591 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.protobuf import struct_pb2 # type: ignore +from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PredictionServiceGrpcTransport +from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +class PredictionServiceClientMeta(type): + """Metaclass for the PredictionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] + _transport_registry["grpc"] = PredictionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PredictionServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PredictionServiceClient(metaclass=PredictionServiceClientMeta): + """A service for online predictions and explanations.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PredictionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PredictionServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def endpoint_path(project: str,location: str,endpoint: str,) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str,str]: + """Parses a endpoint path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PredictionServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the prediction service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PredictionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PredictionServiceTransport): + # transport is a PredictionServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def predict(self, + request: prediction_service.PredictRequest = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.PredictResponse: + r"""Perform an online prediction. + + Args: + request (google.cloud.aiplatform_v1beta1.types.PredictRequest): + The request object. Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + prediction call. A DeployedModel may have an upper limit + on the number of instances it supports per request, and + when it is exceeded the prediction call errors in case + of AutoML Models, or, in case of customer created + Models, the behaviour is as documented by that Model. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.PredictResponse: + Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.PredictRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.PredictRequest): + request = prediction_service.PredictRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if instances is not None: + request.instances = instances + if parameters is not None: + request.parameters = parameters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.predict] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def explain(self, + request: prediction_service.ExplainRequest = None, + *, + endpoint: str = None, + instances: Sequence[struct_pb2.Value] = None, + parameters: struct_pb2.Value = None, + deployed_model_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> prediction_service.ExplainResponse: + r"""Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ExplainRequest): + The request object. Request message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + endpoint (str): + Required. The name of the Endpoint requested to serve + the explanation. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + explanation call. A DeployedModel may have an upper + limit on the number of instances it supports per + request, and when it is exceeded the explanation call + errors in case of AutoML Models, or, in case of customer + created Models, the behaviour is as documented by that + Model. The schema of any single instance may be + specified via Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + + This corresponds to the ``instances`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_model_id (str): + If specified, this ExplainRequest will be served by the + chosen DeployedModel, overriding + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. + + This corresponds to the ``deployed_model_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ExplainResponse: + Response message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.ExplainRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.ExplainRequest): + request = prediction_service.ExplainRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if instances is not None: + request.instances = instances + if parameters is not None: + request.parameters = parameters + if deployed_model_id is not None: + request.deployed_model_id = deployed_model_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.explain] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("endpoint", request.endpoint), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PredictionServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py new file mode 100644 index 0000000000..d747de2ce9 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PredictionServiceTransport +from .grpc import PredictionServiceGrpcTransport +from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] +_transport_registry['grpc'] = PredictionServiceGrpcTransport +_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport + +__all__ = ( + 'PredictionServiceTransport', + 'PredictionServiceGrpcTransport', + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py new file mode 100644 index 0000000000..285f122090 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import prediction_service + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class PredictionServiceTransport(abc.ABC): + """Abstract transport class for PredictionService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.predict: gapic_v1.method.wrap_method( + self.predict, + default_timeout=5.0, + client_info=client_info, + ), + self.explain: gapic_v1.method.wrap_method( + self.explain, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Union[ + prediction_service.PredictResponse, + Awaitable[prediction_service.PredictResponse] + ]]: + raise NotImplementedError() + + @property + def explain(self) -> Callable[ + [prediction_service.ExplainRequest], + Union[ + prediction_service.ExplainResponse, + Awaitable[prediction_service.ExplainResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'PredictionServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py new file mode 100644 index 0000000000..1e95e16b24 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -0,0 +1,289 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import prediction_service +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO + + +class PredictionServiceGrpcTransport(PredictionServiceTransport): + """gRPC backend transport for PredictionService. + + A service for online predictions and explanations. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + prediction_service.PredictResponse]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. + + Returns: + Callable[[~.PredictRequest], + ~.PredictResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def explain(self) -> Callable[ + [prediction_service.ExplainRequest], + prediction_service.ExplainResponse]: + r"""Return a callable for the explain method over gRPC. + + Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + Returns: + Callable[[~.ExplainRequest], + ~.ExplainResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'explain' not in self._stubs: + self._stubs['explain'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/Explain', + request_serializer=prediction_service.ExplainRequest.serialize, + response_deserializer=prediction_service.ExplainResponse.deserialize, + ) + return self._stubs['explain'] + + +__all__ = ( + 'PredictionServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..2c43614249 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -0,0 +1,293 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import prediction_service +from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PredictionServiceGrpcTransport + + +class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): + """gRPC AsyncIO backend transport for PredictionService. + + A service for online predictions and explanations. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def predict(self) -> Callable[ + [prediction_service.PredictRequest], + Awaitable[prediction_service.PredictResponse]]: + r"""Return a callable for the predict method over gRPC. + + Perform an online prediction. + + Returns: + Callable[[~.PredictRequest], + Awaitable[~.PredictResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'predict' not in self._stubs: + self._stubs['predict'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/Predict', + request_serializer=prediction_service.PredictRequest.serialize, + response_deserializer=prediction_service.PredictResponse.deserialize, + ) + return self._stubs['predict'] + + @property + def explain(self) -> Callable[ + [prediction_service.ExplainRequest], + Awaitable[prediction_service.ExplainResponse]]: + r"""Return a callable for the explain method over gRPC. + + Perform an online explanation. + + If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is specified, the corresponding DeployModel must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. If + [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + is not specified, all DeployedModels must have + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + populated. Only deployed AutoML tabular Models have + explanation_spec. + + Returns: + Callable[[~.ExplainRequest], + Awaitable[~.ExplainResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'explain' not in self._stubs: + self._stubs['explain'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.PredictionService/Explain', + request_serializer=prediction_service.ExplainRequest.serialize, + response_deserializer=prediction_service.ExplainResponse.deserialize, + ) + return self._stubs['explain'] + + +__all__ = ( + 'PredictionServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py new file mode 100644 index 0000000000..04af59e5fa --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SpecialistPoolServiceClient +from .async_client import SpecialistPoolServiceAsyncClient + +__all__ = ( + 'SpecialistPoolServiceClient', + 'SpecialistPoolServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py new file mode 100644 index 0000000000..2a8321c773 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport +from .client import SpecialistPoolServiceClient + + +class SpecialistPoolServiceAsyncClient: + """A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + """ + + _client: SpecialistPoolServiceClient + + DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT + + specialist_pool_path = staticmethod(SpecialistPoolServiceClient.specialist_pool_path) + parse_specialist_pool_path = staticmethod(SpecialistPoolServiceClient.parse_specialist_pool_path) + common_billing_account_path = staticmethod(SpecialistPoolServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(SpecialistPoolServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(SpecialistPoolServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(SpecialistPoolServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(SpecialistPoolServiceClient.parse_common_organization_path) + common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path) + parse_common_project_path = staticmethod(SpecialistPoolServiceClient.parse_common_project_path) + common_location_path = staticmethod(SpecialistPoolServiceClient.common_location_path) + parse_common_location_path = staticmethod(SpecialistPoolServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceAsyncClient: The constructed client. + """ + return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceAsyncClient: The constructed client. + """ + return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SpecialistPoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpecialistPoolServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(SpecialistPoolServiceClient).get_transport_class, type(SpecialistPoolServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the specialist pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.SpecialistPoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = SpecialistPoolServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_specialist_pool(self, + request: specialist_pool_service.CreateSpecialistPoolRequest = None, + *, + parent: str = None, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a SpecialistPool. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest`): + The request object. Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + parent (:class:`str`): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + specialist_pool (:class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool`): + Required. The SpecialistPool to + create. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers who are responsible for managing the + labelers in this pool as well as customers' data + labeling jobs associated with this pool. Customers + create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the + jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, specialist_pool]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.CreateSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if specialist_pool is not None: + request.specialist_pool = specialist_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_specialist_pool(self, + request: specialist_pool_service.GetSpecialistPoolRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> specialist_pool.SpecialistPool: + r"""Gets a SpecialistPool. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest`): + The request object. Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. + name (:class:`str`): + Required. The name of the SpecialistPool resource. The + form is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.SpecialistPool: + SpecialistPool represents customers' + own workforce to work on their data + labeling jobs. It includes a group of + specialist managers who are responsible + for managing the labelers in this pool + as well as customers' data labeling jobs + associated with this pool. + Customers create specialist pool as well + as start data labeling jobs on Cloud, + managers and labelers work with the jobs + using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.GetSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_specialist_pools(self, + request: specialist_pool_service.ListSpecialistPoolsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSpecialistPoolsAsyncPager: + r"""Lists SpecialistPools in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest`): + The request object. Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + parent (:class:`str`): + Required. The name of the SpecialistPool's parent + resource. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: + Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.ListSpecialistPoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_specialist_pools, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSpecialistPoolsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_specialist_pool(self, + request: specialist_pool_service.DeleteSpecialistPoolRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a SpecialistPool as well as all Specialists + in the pool. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest`): + The request object. Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. + name (:class:`str`): + Required. The resource name of the SpecialistPool to + delete. Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.DeleteSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_specialist_pool(self, + request: specialist_pool_service.UpdateSpecialistPoolRequest = None, + *, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a SpecialistPool. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest`): + The request object. Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + specialist_pool (:class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool`): + Required. The SpecialistPool which + replaces the resource on the server. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers who are responsible for managing the + labelers in this pool as well as customers' data + labeling jobs associated with this pool. Customers + create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the + jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([specialist_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = specialist_pool_service.UpdateSpecialistPoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if specialist_pool is not None: + request.specialist_pool = specialist_pool + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_specialist_pool, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("specialist_pool.name", request.specialist_pool.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SpecialistPoolServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py new file mode 100644 index 0000000000..43ff07d422 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py @@ -0,0 +1,837 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import SpecialistPoolServiceGrpcTransport +from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport + + +class SpecialistPoolServiceClientMeta(type): + """Metaclass for the SpecialistPoolService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] + _transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport + _transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[SpecialistPoolServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): + """A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SpecialistPoolServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SpecialistPoolServiceTransport: + """Returns the transport used by the client instance. + + Returns: + SpecialistPoolServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def specialist_pool_path(project: str,location: str,specialist_pool: str,) -> str: + """Returns a fully-qualified specialist_pool string.""" + return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) + + @staticmethod + def parse_specialist_pool_path(path: str) -> Dict[str,str]: + """Parses a specialist_pool path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SpecialistPoolServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the specialist pool service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SpecialistPoolServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SpecialistPoolServiceTransport): + # transport is a SpecialistPoolServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_specialist_pool(self, + request: specialist_pool_service.CreateSpecialistPoolRequest = None, + *, + parent: str = None, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a SpecialistPool. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest): + The request object. Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + parent (str): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): + Required. The SpecialistPool to + create. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers who are responsible for managing the + labelers in this pool as well as customers' data + labeling jobs associated with this pool. Customers + create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the + jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, specialist_pool]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.CreateSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest): + request = specialist_pool_service.CreateSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if specialist_pool is not None: + request.specialist_pool = specialist_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + def get_specialist_pool(self, + request: specialist_pool_service.GetSpecialistPoolRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> specialist_pool.SpecialistPool: + r"""Gets a SpecialistPool. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest): + The request object. Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. + name (str): + Required. The name of the SpecialistPool resource. The + form is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.SpecialistPool: + SpecialistPool represents customers' + own workforce to work on their data + labeling jobs. It includes a group of + specialist managers who are responsible + for managing the labelers in this pool + as well as customers' data labeling jobs + associated with this pool. + Customers create specialist pool as well + as start data labeling jobs on Cloud, + managers and labelers work with the jobs + using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.GetSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest): + request = specialist_pool_service.GetSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_specialist_pools(self, + request: specialist_pool_service.ListSpecialistPoolsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSpecialistPoolsPager: + r"""Lists SpecialistPools in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): + The request object. Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + parent (str): + Required. The name of the SpecialistPool's parent + resource. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: + Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.ListSpecialistPoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest): + request = specialist_pool_service.ListSpecialistPoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSpecialistPoolsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_specialist_pool(self, + request: specialist_pool_service.DeleteSpecialistPoolRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a SpecialistPool as well as all Specialists + in the pool. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest): + The request object. Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. + name (str): + Required. The resource name of the SpecialistPool to + delete. Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.DeleteSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest): + request = specialist_pool_service.DeleteSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def update_specialist_pool(self, + request: specialist_pool_service.UpdateSpecialistPoolRequest = None, + *, + specialist_pool: gca_specialist_pool.SpecialistPool = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a SpecialistPool. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest): + The request object. Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): + Required. The SpecialistPool which + replaces the resource on the server. + + This corresponds to the ``specialist_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to + the resource. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data + labeling jobs. It includes a group of specialist + managers who are responsible for managing the + labelers in this pool as well as customers' data + labeling jobs associated with this pool. Customers + create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the + jobs using CrowdCompute console. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([specialist_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a specialist_pool_service.UpdateSpecialistPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest): + request = specialist_pool_service.UpdateSpecialistPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if specialist_pool is not None: + request.specialist_pool = specialist_pool + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("specialist_pool.name", request.specialist_pool.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_specialist_pool.SpecialistPool, + metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SpecialistPoolServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py new file mode 100644 index 0000000000..cac2a57671 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service + + +class ListSpecialistPoolsPager: + """A pager for iterating through ``list_specialist_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``specialist_pools`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSpecialistPools`` requests and continue to iterate + through the ``specialist_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., specialist_pool_service.ListSpecialistPoolsResponse], + request: specialist_pool_service.ListSpecialistPoolsRequest, + response: specialist_pool_service.ListSpecialistPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[specialist_pool_service.ListSpecialistPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[specialist_pool.SpecialistPool]: + for page in self.pages: + yield from page.specialist_pools + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListSpecialistPoolsAsyncPager: + """A pager for iterating through ``list_specialist_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``specialist_pools`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSpecialistPools`` requests and continue to iterate + through the ``specialist_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]], + request: specialist_pool_service.ListSpecialistPoolsRequest, + response: specialist_pool_service.ListSpecialistPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[specialist_pool_service.ListSpecialistPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[specialist_pool.SpecialistPool]: + async def async_generator(): + async for page in self.pages: + for response in page.specialist_pools: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py new file mode 100644 index 0000000000..ba8c9d7eb5 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SpecialistPoolServiceTransport +from .grpc import SpecialistPoolServiceGrpcTransport +from .grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] +_transport_registry['grpc'] = SpecialistPoolServiceGrpcTransport +_transport_registry['grpc_asyncio'] = SpecialistPoolServiceGrpcAsyncIOTransport + +__all__ = ( + 'SpecialistPoolServiceTransport', + 'SpecialistPoolServiceGrpcTransport', + 'SpecialistPoolServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py new file mode 100644 index 0000000000..fe83634f38 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class SpecialistPoolServiceTransport(abc.ABC): + """Abstract transport class for SpecialistPoolService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_specialist_pool: gapic_v1.method.wrap_method( + self.create_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + self.get_specialist_pool: gapic_v1.method.wrap_method( + self.get_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + self.list_specialist_pools: gapic_v1.method.wrap_method( + self.list_specialist_pools, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_specialist_pool: gapic_v1.method.wrap_method( + self.delete_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + self.update_specialist_pool: gapic_v1.method.wrap_method( + self.update_specialist_pool, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + Union[ + specialist_pool.SpecialistPool, + Awaitable[specialist_pool.SpecialistPool] + ]]: + raise NotImplementedError() + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + Union[ + specialist_pool_service.ListSpecialistPoolsResponse, + Awaitable[specialist_pool_service.ListSpecialistPoolsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'SpecialistPoolServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py new file mode 100644 index 0000000000..341de9c299 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.longrunning import operations_pb2 # type: ignore +from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO + + +class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport): + """gRPC backend transport for SpecialistPoolService. + + A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the create specialist pool method over gRPC. + + Creates a SpecialistPool. + + Returns: + Callable[[~.CreateSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_specialist_pool' not in self._stubs: + self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool', + request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_specialist_pool'] + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + specialist_pool.SpecialistPool]: + r"""Return a callable for the get specialist pool method over gRPC. + + Gets a SpecialistPool. + + Returns: + Callable[[~.GetSpecialistPoolRequest], + ~.SpecialistPool]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_specialist_pool' not in self._stubs: + self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool', + request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, + response_deserializer=specialist_pool.SpecialistPool.deserialize, + ) + return self._stubs['get_specialist_pool'] + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + specialist_pool_service.ListSpecialistPoolsResponse]: + r"""Return a callable for the list specialist pools method over gRPC. + + Lists SpecialistPools in a Location. + + Returns: + Callable[[~.ListSpecialistPoolsRequest], + ~.ListSpecialistPoolsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_specialist_pools' not in self._stubs: + self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools', + request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, + response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, + ) + return self._stubs['list_specialist_pools'] + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete specialist pool method over gRPC. + + Deletes a SpecialistPool as well as all Specialists + in the pool. + + Returns: + Callable[[~.DeleteSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_specialist_pool' not in self._stubs: + self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool', + request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_specialist_pool'] + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + operations_pb2.Operation]: + r"""Return a callable for the update specialist pool method over gRPC. + + Updates a SpecialistPool. + + Returns: + Callable[[~.UpdateSpecialistPoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_specialist_pool' not in self._stubs: + self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool', + request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_specialist_pool'] + + +__all__ = ( + 'SpecialistPoolServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..df1f92c5fa --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py @@ -0,0 +1,386 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.longrunning import operations_pb2 # type: ignore +from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import SpecialistPoolServiceGrpcTransport + + +class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport): + """gRPC AsyncIO backend transport for SpecialistPoolService. + + A service for creating and managing Customer SpecialistPools. + When customers start Data Labeling jobs, they can reuse/create + Specialist Pools to bring their own Specialists to label the + data. Customers can add/remove Managers for the Specialist Pool + on Cloud console, then Managers will get email notifications to + manage Specialists and tasks on CrowdCompute console. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_specialist_pool(self) -> Callable[ + [specialist_pool_service.CreateSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create specialist pool method over gRPC. + + Creates a SpecialistPool. + + Returns: + Callable[[~.CreateSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_specialist_pool' not in self._stubs: + self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool', + request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_specialist_pool'] + + @property + def get_specialist_pool(self) -> Callable[ + [specialist_pool_service.GetSpecialistPoolRequest], + Awaitable[specialist_pool.SpecialistPool]]: + r"""Return a callable for the get specialist pool method over gRPC. + + Gets a SpecialistPool. + + Returns: + Callable[[~.GetSpecialistPoolRequest], + Awaitable[~.SpecialistPool]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_specialist_pool' not in self._stubs: + self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool', + request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, + response_deserializer=specialist_pool.SpecialistPool.deserialize, + ) + return self._stubs['get_specialist_pool'] + + @property + def list_specialist_pools(self) -> Callable[ + [specialist_pool_service.ListSpecialistPoolsRequest], + Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]]: + r"""Return a callable for the list specialist pools method over gRPC. + + Lists SpecialistPools in a Location. + + Returns: + Callable[[~.ListSpecialistPoolsRequest], + Awaitable[~.ListSpecialistPoolsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_specialist_pools' not in self._stubs: + self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools', + request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, + response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, + ) + return self._stubs['list_specialist_pools'] + + @property + def delete_specialist_pool(self) -> Callable[ + [specialist_pool_service.DeleteSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete specialist pool method over gRPC. + + Deletes a SpecialistPool as well as all Specialists + in the pool. + + Returns: + Callable[[~.DeleteSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_specialist_pool' not in self._stubs: + self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool', + request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_specialist_pool'] + + @property + def update_specialist_pool(self) -> Callable[ + [specialist_pool_service.UpdateSpecialistPoolRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update specialist pool method over gRPC. + + Updates a SpecialistPool. + + Returns: + Callable[[~.UpdateSpecialistPoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_specialist_pool' not in self._stubs: + self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool', + request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_specialist_pool'] + + +__all__ = ( + 'SpecialistPoolServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py new file mode 100644 index 0000000000..fa8edec482 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TensorboardServiceClient +from .async_client import TensorboardServiceAsyncClient + +__all__ = ( + 'TensorboardServiceClient', + 'TensorboardServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py new file mode 100644 index 0000000000..af1d3fe6ec --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py @@ -0,0 +1,2348 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport +from .client import TensorboardServiceClient + + +class TensorboardServiceAsyncClient: + """TensorboardService""" + + _client: TensorboardServiceClient + + DEFAULT_ENDPOINT = TensorboardServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = TensorboardServiceClient.DEFAULT_MTLS_ENDPOINT + + tensorboard_path = staticmethod(TensorboardServiceClient.tensorboard_path) + parse_tensorboard_path = staticmethod(TensorboardServiceClient.parse_tensorboard_path) + tensorboard_experiment_path = staticmethod(TensorboardServiceClient.tensorboard_experiment_path) + parse_tensorboard_experiment_path = staticmethod(TensorboardServiceClient.parse_tensorboard_experiment_path) + tensorboard_run_path = staticmethod(TensorboardServiceClient.tensorboard_run_path) + parse_tensorboard_run_path = staticmethod(TensorboardServiceClient.parse_tensorboard_run_path) + tensorboard_time_series_path = staticmethod(TensorboardServiceClient.tensorboard_time_series_path) + parse_tensorboard_time_series_path = staticmethod(TensorboardServiceClient.parse_tensorboard_time_series_path) + common_billing_account_path = staticmethod(TensorboardServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(TensorboardServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(TensorboardServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(TensorboardServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(TensorboardServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(TensorboardServiceClient.parse_common_organization_path) + common_project_path = staticmethod(TensorboardServiceClient.common_project_path) + parse_common_project_path = staticmethod(TensorboardServiceClient.parse_common_project_path) + common_location_path = staticmethod(TensorboardServiceClient.common_location_path) + parse_common_location_path = staticmethod(TensorboardServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceAsyncClient: The constructed client. + """ + return TensorboardServiceClient.from_service_account_info.__func__(TensorboardServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceAsyncClient: The constructed client. + """ + return TensorboardServiceClient.from_service_account_file.__func__(TensorboardServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TensorboardServiceTransport: + """Returns the transport used by the client instance. + + Returns: + TensorboardServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(TensorboardServiceClient).get_transport_class, type(TensorboardServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, TensorboardServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the tensorboard service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.TensorboardServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = TensorboardServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_tensorboard(self, + request: tensorboard_service.CreateTensorboardRequest = None, + *, + parent: str = None, + tensorboard: gca_tensorboard.Tensorboard = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a Tensorboard. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest`): + The request object. Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. + parent (:class:`str`): + Required. The resource name of the Location to create + the Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): + Required. The Tensorboard to create. + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard(self, + request: tensorboard_service.GetTensorboardRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard.Tensorboard: + r"""Gets a Tensorboard. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest`): + The request object. Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. + name (:class:`str`): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Tensorboard: + Tensorboard is a physical database + that stores users' training metrics. A + default Tensorboard is provided in each + region of a GCP project. If needed users + can also create extra Tensorboards in + their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard(self, + request: tensorboard_service.UpdateTensorboardRequest = None, + *, + tensorboard: gca_tensorboard.Tensorboard = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a Tensorboard. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest`): + The request object. Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. + tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if + new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard.name", request.tensorboard.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_tensorboards(self, + request: tensorboard_service.ListTensorboardsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardsAsyncPager: + r"""Lists Tensorboards in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest`): + The request object. Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + parent (:class:`str`): + Required. The resource name of the + Location to list Tensorboards. Format: + 'projects/{project}/locations/{location}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager: + Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboards, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard(self, + request: tensorboard_service.DeleteTensorboardRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a Tensorboard. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest`): + The request object. Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. + name (:class:`str`): + Required. The name of the Tensorboard to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_experiment(self, + request: tensorboard_service.CreateTensorboardExperimentRequest = None, + *, + parent: str = None, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + tensorboard_experiment_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Creates a TensorboardExperiment. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest`): + The request object. Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. + parent (:class:`str`): + Required. The resource name of the Tensorboard to create + the TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): + The TensorboardExperiment to create. + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment_id (:class:`str`): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_experiment_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if tensorboard_experiment_id is not None: + request.tensorboard_experiment_id = tensorboard_experiment_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard_experiment(self, + request: tensorboard_service.GetTensorboardExperimentRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_experiment.TensorboardExperiment: + r"""Gets a TensorboardExperiment. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest`): + The request object. Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. + name (:class:`str`): + Required. The name of the TensorboardExperiment + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard_experiment(self, + request: tensorboard_service.UpdateTensorboardExperimentRequest = None, + *, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Updates a TensorboardExperiment. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest`): + The request object. Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. + tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): + Required. The TensorboardExperiment's ``name`` field is + used to identify the TensorboardExperiment to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment.name", request.tensorboard_experiment.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tensorboard_experiments(self, + request: tensorboard_service.ListTensorboardExperimentsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardExperimentsAsyncPager: + r"""Lists TensorboardExperiments in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest`): + The request object. Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + parent (:class:`str`): + Required. The resource name of the + Tensorboard to list + TensorboardExperiments. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager: + Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardExperimentsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_experiments, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardExperimentsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_experiment(self, + request: tensorboard_service.DeleteTensorboardExperimentRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardExperiment. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest`): + The request object. Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. + name (:class:`str`): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardExperimentRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_experiment, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_run(self, + request: tensorboard_service.CreateTensorboardRunRequest = None, + *, + parent: str = None, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + tensorboard_run_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Creates a TensorboardRun. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest`): + The request object. Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. + parent (:class:`str`): + Required. The resource name of the Tensorboard to create + the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): + Required. The TensorboardRun to + create. + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run_id (:class:`str`): + Required. The ID to use for the Tensorboard run, which + will become the final component of the Tensorboard run's + resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_run_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if tensorboard_run_id is not None: + request.tensorboard_run_id = tensorboard_run_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard_run(self, + request: tensorboard_service.GetTensorboardRunRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_run.TensorboardRun: + r"""Gets a TensorboardRun. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest`): + The request object. Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. + name (:class:`str`): + Required. The name of the TensorboardRun resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard_run(self, + request: tensorboard_service.UpdateTensorboardRunRequest = None, + *, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Updates a TensorboardRun. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest`): + The request object. Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. + tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run.name", request.tensorboard_run.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tensorboard_runs(self, + request: tensorboard_service.ListTensorboardRunsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardRunsAsyncPager: + r"""Lists TensorboardRuns in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest`): + The request object. Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + parent (:class:`str`): + Required. The resource name of the + Tensorboard to list TensorboardRuns. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager: + Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardRunsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_runs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardRunsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_run(self, + request: tensorboard_service.DeleteTensorboardRunRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardRun. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest`): + The request object. Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. + name (:class:`str`): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardRunRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_run, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def create_tensorboard_time_series(self, + request: tensorboard_service.CreateTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Creates a TensorboardTimeSeries. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the TensorboardRun to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): + Required. The TensorboardTimeSeries + to create. + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tensorboard_time_series(self, + request: tensorboard_service.GetTensorboardTimeSeriesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_time_series.TensorboardTimeSeries: + r"""Gets a TensorboardTimeSeries. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. + name (:class:`str`): + Required. The name of the TensorboardTimeSeries + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_tensorboard_time_series(self, + request: tensorboard_service.UpdateTensorboardTimeSeriesRequest = None, + *, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Updates a TensorboardTimeSeries. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. + tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): + Required. The TensorboardTimeSeries' ``name`` field is + used to identify the TensorboardTimeSeries to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series.name", request.tensorboard_time_series.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tensorboard_time_series(self, + request: tensorboard_service.ListTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardTimeSeriesAsyncPager: + r"""Lists TensorboardTimeSeries in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + parent (:class:`str`): + Required. The resource name of the + TensorboardRun to list + TensorboardTimeSeries. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager: + Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTensorboardTimeSeriesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_tensorboard_time_series(self, + request: tensorboard_service.DeleteTensorboardTimeSeriesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a TensorboardTimeSeries. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest`): + The request object. Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. + name (:class:`str`): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_tensorboard_time_series, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def read_tensorboard_time_series_data(self, + request: tensorboard_service.ReadTensorboardTimeSeriesDataRequest = None, + *, + tensorboard_time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: + r"""Reads a TensorboardTimeSeries' data. Data is returned in + paginated responses. By default, if the number of data points + stored is less than 1000, all data will be returned. Otherwise, + 1000 data points will be randomly selected from this time series + and returned. This value can be changed by changing + max_data_points. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest`): + The request object. Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + tensorboard_time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_blob_data(self, + request: tensorboard_service.ReadTensorboardBlobDataRequest = None, + *, + time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[tensorboard_service.ReadTensorboardBlobDataResponse]]: + r"""Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest`): + The request object. Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: + Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ReadTensorboardBlobDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if time_series is not None: + request.time_series = time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_tensorboard_blob_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("time_series", request.time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def write_tensorboard_run_data(self, + request: tensorboard_service.WriteTensorboardRunDataRequest = None, + *, + tensorboard_run: str = None, + time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardRunDataResponse: + r"""Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest`): + The request object. Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + tensorboard_run (:class:`str`): + Required. The resource name of the TensorboardRun to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series_data (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]`): + Required. The TensorboardTimeSeries + data to write. Values with in a time + series are indexed by their step value. + Repeated writes to the same step will + overwrite the existing value for that + step. + The upper limit of data points per write + request is 5000. + + This corresponds to the ``time_series_data`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: + Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, time_series_data]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.WriteTensorboardRunDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if time_series_data: + request.time_series_data.extend(time_series_data) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.write_tensorboard_run_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run", request.tensorboard_run), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def export_tensorboard_time_series_data(self, + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest = None, + *, + tensorboard_time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ExportTensorboardTimeSeriesDataAsyncPager: + r"""Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest`): + The request object. Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + tensorboard_time_series (:class:`str`): + Required. The resource name of the TensorboardTimeSeries + to export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager: + Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.export_tensorboard_time_series_data, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ExportTensorboardTimeSeriesDataAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TensorboardServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py new file mode 100644 index 0000000000..140d9569e3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py @@ -0,0 +1,2562 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import TensorboardServiceGrpcTransport +from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport + + +class TensorboardServiceClientMeta(type): + """Metaclass for the TensorboardService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] + _transport_registry["grpc"] = TensorboardServiceGrpcTransport + _transport_registry["grpc_asyncio"] = TensorboardServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TensorboardServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TensorboardServiceClient(metaclass=TensorboardServiceClientMeta): + """TensorboardService""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TensorboardServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TensorboardServiceTransport: + """Returns the transport used by the client instance. + + Returns: + TensorboardServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: + """Returns a fully-qualified tensorboard string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + + @staticmethod + def parse_tensorboard_path(path: str) -> Dict[str,str]: + """Parses a tensorboard path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_experiment_path(project: str,location: str,tensorboard: str,experiment: str,) -> str: + """Returns a fully-qualified tensorboard_experiment string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) + + @staticmethod + def parse_tensorboard_experiment_path(path: str) -> Dict[str,str]: + """Parses a tensorboard_experiment path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_run_path(project: str,location: str,tensorboard: str,experiment: str,run: str,) -> str: + """Returns a fully-qualified tensorboard_run string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) + + @staticmethod + def parse_tensorboard_run_path(path: str) -> Dict[str,str]: + """Parses a tensorboard_run path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def tensorboard_time_series_path(project: str,location: str,tensorboard: str,experiment: str,run: str,time_series: str,) -> str: + """Returns a fully-qualified tensorboard_time_series string.""" + return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) + + @staticmethod + def parse_tensorboard_time_series_path(path: str) -> Dict[str,str]: + """Parses a tensorboard_time_series path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)/timeSeries/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TensorboardServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the tensorboard service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TensorboardServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TensorboardServiceTransport): + # transport is a TensorboardServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_tensorboard(self, + request: tensorboard_service.CreateTensorboardRequest = None, + *, + parent: str = None, + tensorboard: gca_tensorboard.Tensorboard = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a Tensorboard. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest): + The request object. Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. + parent (str): + Required. The resource name of the Location to create + the Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard to create. + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardRequest): + request = tensorboard_service.CreateTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard is not None: + request.tensorboard = tensorboard + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + def get_tensorboard(self, + request: tensorboard_service.GetTensorboardRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard.Tensorboard: + r"""Gets a Tensorboard. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest): + The request object. Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. + name (str): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Tensorboard: + Tensorboard is a physical database + that stores users' training metrics. A + default Tensorboard is provided in each + region of a GCP project. If needed users + can also create extra Tensorboards in + their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardRequest): + request = tensorboard_service.GetTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard(self, + request: tensorboard_service.UpdateTensorboardRequest = None, + *, + tensorboard: gca_tensorboard.Tensorboard = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a Tensorboard. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest): + The request object. Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``tensorboard`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. + The fields specified in the update_mask are relative to + the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if + new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. + A default Tensorboard is provided in each region of a + GCP project. If needed users can also create extra + Tensorboards in their projects. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardRequest): + request = tensorboard_service.UpdateTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard is not None: + request.tensorboard = tensorboard + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard.name", request.tensorboard.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_tensorboard.Tensorboard, + metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, + ) + + # Done; return the response. + return response + + def list_tensorboards(self, + request: tensorboard_service.ListTensorboardsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardsPager: + r"""Lists Tensorboards in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): + The request object. Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + parent (str): + Required. The resource name of the + Location to list Tensorboards. Format: + 'projects/{project}/locations/{location}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsPager: + Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardsRequest): + request = tensorboard_service.ListTensorboardsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboards] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard(self, + request: tensorboard_service.DeleteTensorboardRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a Tensorboard. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest): + The request object. Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. + name (str): + Required. The name of the Tensorboard to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardRequest): + request = tensorboard_service.DeleteTensorboardRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_experiment(self, + request: tensorboard_service.CreateTensorboardExperimentRequest = None, + *, + parent: str = None, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + tensorboard_experiment_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Creates a TensorboardExperiment. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest): + The request object. Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. + parent (str): + Required. The resource name of the Tensorboard to create + the TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + The TensorboardExperiment to create. + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_experiment_id (str): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_experiment_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardExperimentRequest): + request = tensorboard_service.CreateTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if tensorboard_experiment_id is not None: + request.tensorboard_experiment_id = tensorboard_experiment_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tensorboard_experiment(self, + request: tensorboard_service.GetTensorboardExperimentRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_experiment.TensorboardExperiment: + r"""Gets a TensorboardExperiment. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest): + The request object. Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. + name (str): + Required. The name of the TensorboardExperiment + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardExperimentRequest): + request = tensorboard_service.GetTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard_experiment(self, + request: tensorboard_service.UpdateTensorboardExperimentRequest = None, + *, + tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_experiment.TensorboardExperiment: + r"""Updates a TensorboardExperiment. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest): + The request object. Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + Required. The TensorboardExperiment's ``name`` field is + used to identify the TensorboardExperiment to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``tensorboard_experiment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: + A TensorboardExperiment is a group of + TensorboardRuns, that are typically the + results of a training job run, in a + Tensorboard. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_experiment, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardExperimentRequest): + request = tensorboard_service.UpdateTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_experiment is not None: + request.tensorboard_experiment = tensorboard_experiment + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_experiment.name", request.tensorboard_experiment.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tensorboard_experiments(self, + request: tensorboard_service.ListTensorboardExperimentsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardExperimentsPager: + r"""Lists TensorboardExperiments in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): + The request object. Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + parent (str): + Required. The resource name of the + Tensorboard to list + TensorboardExperiments. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager: + Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardExperimentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardExperimentsRequest): + request = tensorboard_service.ListTensorboardExperimentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_experiments] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardExperimentsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_experiment(self, + request: tensorboard_service.DeleteTensorboardExperimentRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardExperiment. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest): + The request object. Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. + name (str): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardExperimentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardExperimentRequest): + request = tensorboard_service.DeleteTensorboardExperimentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_experiment] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_run(self, + request: tensorboard_service.CreateTensorboardRunRequest = None, + *, + parent: str = None, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + tensorboard_run_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Creates a TensorboardRun. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest): + The request object. Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. + parent (str): + Required. The resource name of the Tensorboard to create + the TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun to + create. + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_run_id (str): + Required. The ID to use for the Tensorboard run, which + will become the final component of the Tensorboard run's + resource name. + + This value should be 1-128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``tensorboard_run_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardRunRequest): + request = tensorboard_service.CreateTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if tensorboard_run_id is not None: + request.tensorboard_run_id = tensorboard_run_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tensorboard_run(self, + request: tensorboard_service.GetTensorboardRunRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_run.TensorboardRun: + r"""Gets a TensorboardRun. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest): + The request object. Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. + name (str): + Required. The name of the TensorboardRun resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardRunRequest): + request = tensorboard_service.GetTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard_run(self, + request: tensorboard_service.UpdateTensorboardRunRequest = None, + *, + tensorboard_run: gca_tensorboard_run.TensorboardRun = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_run.TensorboardRun: + r"""Updates a TensorboardRun. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest): + The request object. Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardRun: + TensorboardRun maps to a specific + execution of a training job with a given + set of hyperparameter values, model + definition, dataset, etc + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardRunRequest): + request = tensorboard_service.UpdateTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run.name", request.tensorboard_run.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tensorboard_runs(self, + request: tensorboard_service.ListTensorboardRunsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardRunsPager: + r"""Lists TensorboardRuns in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): + The request object. Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + parent (str): + Required. The resource name of the + Tensorboard to list TensorboardRuns. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsPager: + Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardRunsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardRunsRequest): + request = tensorboard_service.ListTensorboardRunsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_runs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardRunsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_run(self, + request: tensorboard_service.DeleteTensorboardRunRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardRun. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest): + The request object. Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. + name (str): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardRunRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardRunRequest): + request = tensorboard_service.DeleteTensorboardRunRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_run] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def create_tensorboard_time_series(self, + request: tensorboard_service.CreateTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Creates a TensorboardTimeSeries. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. + parent (str): + Required. The resource name of the TensorboardRun to + create the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries + to create. + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.CreateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.CreateTensorboardTimeSeriesRequest): + request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tensorboard_time_series(self, + request: tensorboard_service.GetTensorboardTimeSeriesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_time_series.TensorboardTimeSeries: + r"""Gets a TensorboardTimeSeries. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. + name (str): + Required. The name of the TensorboardTimeSeries + resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.GetTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.GetTensorboardTimeSeriesRequest): + request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_tensorboard_time_series(self, + request: tensorboard_service.UpdateTensorboardTimeSeriesRequest = None, + *, + tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tensorboard_time_series.TensorboardTimeSeries: + r"""Updates a TensorboardTimeSeries. + + Args: + request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries' ``name`` field is + used to identify the TensorboardTimeSeries to be + updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are + relative to the resource, not the full request. A field + will be overwritten if it is in the mask. If the user + does not provide a mask then all fields will be + overwritten if new values are specified. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: + TensorboardTimeSeries maps to times + series produced in training runs + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.UpdateTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.UpdateTensorboardTimeSeriesRequest): + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series.name", request.tensorboard_time_series.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tensorboard_time_series(self, + request: tensorboard_service.ListTensorboardTimeSeriesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTensorboardTimeSeriesPager: + r"""Lists TensorboardTimeSeries in a Location. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + parent (str): + Required. The resource name of the + TensorboardRun to list + TensorboardTimeSeries. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager: + Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ListTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ListTensorboardTimeSeriesRequest): + request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTensorboardTimeSeriesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_tensorboard_time_series(self, + request: tensorboard_service.DeleteTensorboardTimeSeriesRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a TensorboardTimeSeries. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest): + The request object. Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. + name (str): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.DeleteTensorboardTimeSeriesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.DeleteTensorboardTimeSeriesRequest): + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_time_series] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_time_series_data(self, + request: tensorboard_service.ReadTensorboardTimeSeriesDataRequest = None, + *, + tensorboard_time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: + r"""Reads a TensorboardTimeSeries' data. Data is returned in + paginated responses. By default, if the number of data points + stored is less than 1000, all data will be returned. Otherwise, + 1000 data points will be randomly selected from this time series + and returned. This value can be changed by changing + max_data_points. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest): + The request object. Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries + to read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: + Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ReadTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ReadTensorboardTimeSeriesDataRequest): + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_time_series_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_tensorboard_blob_data(self, + request: tensorboard_service.ReadTensorboardBlobDataRequest = None, + *, + time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[tensorboard_service.ReadTensorboardBlobDataResponse]: + r"""Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest): + The request object. Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + time_series (str): + Required. The resource name of the TensorboardTimeSeries + to list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + + This corresponds to the ``time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: + Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ReadTensorboardBlobDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ReadTensorboardBlobDataRequest): + request = tensorboard_service.ReadTensorboardBlobDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if time_series is not None: + request.time_series = time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_blob_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("time_series", request.time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def write_tensorboard_run_data(self, + request: tensorboard_service.WriteTensorboardRunDataRequest = None, + *, + tensorboard_run: str = None, + time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tensorboard_service.WriteTensorboardRunDataResponse: + r"""Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Args: + request (google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest): + The request object. Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + tensorboard_run (str): + Required. The resource name of the TensorboardRun to + write data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + + This corresponds to the ``tensorboard_run`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): + Required. The TensorboardTimeSeries + data to write. Values with in a time + series are indexed by their step value. + Repeated writes to the same step will + overwrite the existing value for that + step. + The upper limit of data points per write + request is 5000. + + This corresponds to the ``time_series_data`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: + Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_run, time_series_data]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.WriteTensorboardRunDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.WriteTensorboardRunDataRequest): + request = tensorboard_service.WriteTensorboardRunDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_run is not None: + request.tensorboard_run = tensorboard_run + if time_series_data is not None: + request.time_series_data = time_series_data + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_run_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_run", request.tensorboard_run), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def export_tensorboard_time_series_data(self, + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest = None, + *, + tensorboard_time_series: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ExportTensorboardTimeSeriesDataPager: + r"""Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): + The request object. Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries + to export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + + This corresponds to the ``tensorboard_time_series`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager: + Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([tensorboard_time_series]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a tensorboard_service.ExportTensorboardTimeSeriesDataRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, tensorboard_service.ExportTensorboardTimeSeriesDataRequest): + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if tensorboard_time_series is not None: + request.tensorboard_time_series = tensorboard_time_series + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.export_tensorboard_time_series_data] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("tensorboard_time_series", request.tensorboard_time_series), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ExportTensorboardTimeSeriesDataPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TensorboardServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py new file mode 100644 index 0000000000..d0e307d0f7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py @@ -0,0 +1,633 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series + + +class ListTensorboardsPager: + """A pager for iterating through ``list_tensorboards`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboards`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboards`` requests and continue to iterate + through the ``tensorboards`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardsResponse], + request: tensorboard_service.ListTensorboardsRequest, + response: tensorboard_service.ListTensorboardsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[tensorboard_service.ListTensorboardsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard.Tensorboard]: + for page in self.pages: + yield from page.tensorboards + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardsAsyncPager: + """A pager for iterating through ``list_tensorboards`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboards`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboards`` requests and continue to iterate + through the ``tensorboards`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardsResponse]], + request: tensorboard_service.ListTensorboardsRequest, + response: tensorboard_service.ListTensorboardsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[tensorboard_service.ListTensorboardsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard.Tensorboard]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboards: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardExperimentsPager: + """A pager for iterating through ``list_tensorboard_experiments`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_experiments`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardExperiments`` requests and continue to iterate + through the ``tensorboard_experiments`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardExperimentsResponse], + request: tensorboard_service.ListTensorboardExperimentsRequest, + response: tensorboard_service.ListTensorboardExperimentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[tensorboard_service.ListTensorboardExperimentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard_experiment.TensorboardExperiment]: + for page in self.pages: + yield from page.tensorboard_experiments + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardExperimentsAsyncPager: + """A pager for iterating through ``list_tensorboard_experiments`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_experiments`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardExperiments`` requests and continue to iterate + through the ``tensorboard_experiments`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]], + request: tensorboard_service.ListTensorboardExperimentsRequest, + response: tensorboard_service.ListTensorboardExperimentsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[tensorboard_service.ListTensorboardExperimentsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard_experiment.TensorboardExperiment]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_experiments: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardRunsPager: + """A pager for iterating through ``list_tensorboard_runs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_runs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardRuns`` requests and continue to iterate + through the ``tensorboard_runs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardRunsResponse], + request: tensorboard_service.ListTensorboardRunsRequest, + response: tensorboard_service.ListTensorboardRunsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardRunsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[tensorboard_service.ListTensorboardRunsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard_run.TensorboardRun]: + for page in self.pages: + yield from page.tensorboard_runs + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardRunsAsyncPager: + """A pager for iterating through ``list_tensorboard_runs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_runs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardRuns`` requests and continue to iterate + through the ``tensorboard_runs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardRunsResponse]], + request: tensorboard_service.ListTensorboardRunsRequest, + response: tensorboard_service.ListTensorboardRunsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardRunsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[tensorboard_service.ListTensorboardRunsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard_run.TensorboardRun]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_runs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardTimeSeriesPager: + """A pager for iterating through ``list_tensorboard_time_series`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tensorboard_time_series`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTensorboardTimeSeries`` requests and continue to iterate + through the ``tensorboard_time_series`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ListTensorboardTimeSeriesResponse], + request: tensorboard_service.ListTensorboardTimeSeriesRequest, + response: tensorboard_service.ListTensorboardTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[tensorboard_service.ListTensorboardTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard_time_series.TensorboardTimeSeries]: + for page in self.pages: + yield from page.tensorboard_time_series + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTensorboardTimeSeriesAsyncPager: + """A pager for iterating through ``list_tensorboard_time_series`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tensorboard_time_series`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTensorboardTimeSeries`` requests and continue to iterate + through the ``tensorboard_time_series`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]], + request: tensorboard_service.ListTensorboardTimeSeriesRequest, + response: tensorboard_service.ListTensorboardTimeSeriesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[tensorboard_service.ListTensorboardTimeSeriesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard_time_series.TensorboardTimeSeries]: + async def async_generator(): + async for page in self.pages: + for response in page.tensorboard_time_series: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ExportTensorboardTimeSeriesDataPager: + """A pager for iterating through ``export_tensorboard_time_series_data`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and + provides an ``__iter__`` method to iterate through its + ``time_series_data_points`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ExportTensorboardTimeSeriesData`` requests and continue to iterate + through the ``time_series_data_points`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., tensorboard_service.ExportTensorboardTimeSeriesDataResponse], + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[tensorboard_data.TimeSeriesDataPoint]: + for page in self.pages: + yield from page.time_series_data_points + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ExportTensorboardTimeSeriesDataAsyncPager: + """A pager for iterating through ``export_tensorboard_time_series_data`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``time_series_data_points`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ExportTensorboardTimeSeriesData`` requests and continue to iterate + through the ``time_series_data_points`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]], + request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, + response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[tensorboard_data.TimeSeriesDataPoint]: + async def async_generator(): + async for page in self.pages: + for response in page.time_series_data_points: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py new file mode 100644 index 0000000000..9565b55932 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TensorboardServiceTransport +from .grpc import TensorboardServiceGrpcTransport +from .grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] +_transport_registry['grpc'] = TensorboardServiceGrpcTransport +_transport_registry['grpc_asyncio'] = TensorboardServiceGrpcAsyncIOTransport + +__all__ = ( + 'TensorboardServiceTransport', + 'TensorboardServiceGrpcTransport', + 'TensorboardServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py new file mode 100644 index 0000000000..292d2c4990 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py @@ -0,0 +1,504 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.longrunning import operations_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class TensorboardServiceTransport(abc.ABC): + """Abstract transport class for TensorboardService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_tensorboard: gapic_v1.method.wrap_method( + self.create_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard: gapic_v1.method.wrap_method( + self.get_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard: gapic_v1.method.wrap_method( + self.update_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboards: gapic_v1.method.wrap_method( + self.list_tensorboards, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard: gapic_v1.method.wrap_method( + self.delete_tensorboard, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_experiment: gapic_v1.method.wrap_method( + self.create_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_experiment: gapic_v1.method.wrap_method( + self.get_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_experiment: gapic_v1.method.wrap_method( + self.update_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_experiments: gapic_v1.method.wrap_method( + self.list_tensorboard_experiments, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_experiment: gapic_v1.method.wrap_method( + self.delete_tensorboard_experiment, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_run: gapic_v1.method.wrap_method( + self.create_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_run: gapic_v1.method.wrap_method( + self.get_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_run: gapic_v1.method.wrap_method( + self.update_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_runs: gapic_v1.method.wrap_method( + self.list_tensorboard_runs, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_run: gapic_v1.method.wrap_method( + self.delete_tensorboard_run, + default_timeout=None, + client_info=client_info, + ), + self.create_tensorboard_time_series: gapic_v1.method.wrap_method( + self.create_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.get_tensorboard_time_series: gapic_v1.method.wrap_method( + self.get_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.update_tensorboard_time_series: gapic_v1.method.wrap_method( + self.update_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.list_tensorboard_time_series: gapic_v1.method.wrap_method( + self.list_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.delete_tensorboard_time_series: gapic_v1.method.wrap_method( + self.delete_tensorboard_time_series, + default_timeout=None, + client_info=client_info, + ), + self.read_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.read_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + self.read_tensorboard_blob_data: gapic_v1.method.wrap_method( + self.read_tensorboard_blob_data, + default_timeout=None, + client_info=client_info, + ), + self.write_tensorboard_run_data: gapic_v1.method.wrap_method( + self.write_tensorboard_run_data, + default_timeout=None, + client_info=client_info, + ), + self.export_tensorboard_time_series_data: gapic_v1.method.wrap_method( + self.export_tensorboard_time_series_data, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_tensorboard(self) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard(self) -> Callable[ + [tensorboard_service.GetTensorboardRequest], + Union[ + tensorboard.Tensorboard, + Awaitable[tensorboard.Tensorboard] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_tensorboards(self) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + Union[ + tensorboard_service.ListTensorboardsResponse, + Awaitable[tensorboard_service.ListTensorboardsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + Union[ + gca_tensorboard_experiment.TensorboardExperiment, + Awaitable[gca_tensorboard_experiment.TensorboardExperiment] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + Union[ + tensorboard_experiment.TensorboardExperiment, + Awaitable[tensorboard_experiment.TensorboardExperiment] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + Union[ + gca_tensorboard_experiment.TensorboardExperiment, + Awaitable[gca_tensorboard_experiment.TensorboardExperiment] + ]]: + raise NotImplementedError() + + @property + def list_tensorboard_experiments(self) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + Union[ + tensorboard_service.ListTensorboardExperimentsResponse, + Awaitable[tensorboard_service.ListTensorboardExperimentsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_tensorboard_run(self) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + Union[ + gca_tensorboard_run.TensorboardRun, + Awaitable[gca_tensorboard_run.TensorboardRun] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard_run(self) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + Union[ + tensorboard_run.TensorboardRun, + Awaitable[tensorboard_run.TensorboardRun] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard_run(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + Union[ + gca_tensorboard_run.TensorboardRun, + Awaitable[gca_tensorboard_run.TensorboardRun] + ]]: + raise NotImplementedError() + + @property + def list_tensorboard_runs(self) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + Union[ + tensorboard_service.ListTensorboardRunsResponse, + Awaitable[tensorboard_service.ListTensorboardRunsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard_run(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + Union[ + gca_tensorboard_time_series.TensorboardTimeSeries, + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] + ]]: + raise NotImplementedError() + + @property + def get_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + Union[ + tensorboard_time_series.TensorboardTimeSeries, + Awaitable[tensorboard_time_series.TensorboardTimeSeries] + ]]: + raise NotImplementedError() + + @property + def update_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + Union[ + gca_tensorboard_time_series.TensorboardTimeSeries, + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] + ]]: + raise NotImplementedError() + + @property + def list_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + Union[ + tensorboard_service.ListTensorboardTimeSeriesResponse, + Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + Union[ + tensorboard_service.ReadTensorboardTimeSeriesDataResponse, + Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse] + ]]: + raise NotImplementedError() + + @property + def read_tensorboard_blob_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + Union[ + tensorboard_service.ReadTensorboardBlobDataResponse, + Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse] + ]]: + raise NotImplementedError() + + @property + def write_tensorboard_run_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + Union[ + tensorboard_service.WriteTensorboardRunDataResponse, + Awaitable[tensorboard_service.WriteTensorboardRunDataResponse] + ]]: + raise NotImplementedError() + + @property + def export_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + Union[ + tensorboard_service.ExportTensorboardTimeSeriesDataResponse, + Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'TensorboardServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py new file mode 100644 index 0000000000..1f8bd336b4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py @@ -0,0 +1,889 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.longrunning import operations_pb2 # type: ignore +from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO + + +class TensorboardServiceGrpcTransport(TensorboardServiceTransport): + """gRPC backend transport for TensorboardService. + + TensorboardService + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_tensorboard(self) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], + operations_pb2.Operation]: + r"""Return a callable for the create tensorboard method over gRPC. + + Creates a Tensorboard. + + Returns: + Callable[[~.CreateTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard' not in self._stubs: + self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard', + request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_tensorboard'] + + @property + def get_tensorboard(self) -> Callable[ + [tensorboard_service.GetTensorboardRequest], + tensorboard.Tensorboard]: + r"""Return a callable for the get tensorboard method over gRPC. + + Gets a Tensorboard. + + Returns: + Callable[[~.GetTensorboardRequest], + ~.Tensorboard]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard' not in self._stubs: + self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard', + request_serializer=tensorboard_service.GetTensorboardRequest.serialize, + response_deserializer=tensorboard.Tensorboard.deserialize, + ) + return self._stubs['get_tensorboard'] + + @property + def update_tensorboard(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], + operations_pb2.Operation]: + r"""Return a callable for the update tensorboard method over gRPC. + + Updates a Tensorboard. + + Returns: + Callable[[~.UpdateTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard' not in self._stubs: + self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard', + request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_tensorboard'] + + @property + def list_tensorboards(self) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + tensorboard_service.ListTensorboardsResponse]: + r"""Return a callable for the list tensorboards method over gRPC. + + Lists Tensorboards in a Location. + + Returns: + Callable[[~.ListTensorboardsRequest], + ~.ListTensorboardsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboards' not in self._stubs: + self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards', + request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, + ) + return self._stubs['list_tensorboards'] + + @property + def delete_tensorboard(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard method over gRPC. + + Deletes a Tensorboard. + + Returns: + Callable[[~.DeleteTensorboardRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard' not in self._stubs: + self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard', + request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard'] + + @property + def create_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + gca_tensorboard_experiment.TensorboardExperiment]: + r"""Return a callable for the create tensorboard experiment method over gRPC. + + Creates a TensorboardExperiment. + + Returns: + Callable[[~.CreateTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_experiment' not in self._stubs: + self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment', + request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['create_tensorboard_experiment'] + + @property + def get_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + tensorboard_experiment.TensorboardExperiment]: + r"""Return a callable for the get tensorboard experiment method over gRPC. + + Gets a TensorboardExperiment. + + Returns: + Callable[[~.GetTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_experiment' not in self._stubs: + self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment', + request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, + response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['get_tensorboard_experiment'] + + @property + def update_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + gca_tensorboard_experiment.TensorboardExperiment]: + r"""Return a callable for the update tensorboard experiment method over gRPC. + + Updates a TensorboardExperiment. + + Returns: + Callable[[~.UpdateTensorboardExperimentRequest], + ~.TensorboardExperiment]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_experiment' not in self._stubs: + self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment', + request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['update_tensorboard_experiment'] + + @property + def list_tensorboard_experiments(self) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + tensorboard_service.ListTensorboardExperimentsResponse]: + r"""Return a callable for the list tensorboard experiments method over gRPC. + + Lists TensorboardExperiments in a Location. + + Returns: + Callable[[~.ListTensorboardExperimentsRequest], + ~.ListTensorboardExperimentsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_experiments' not in self._stubs: + self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments', + request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, + ) + return self._stubs['list_tensorboard_experiments'] + + @property + def delete_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard experiment method over gRPC. + + Deletes a TensorboardExperiment. + + Returns: + Callable[[~.DeleteTensorboardExperimentRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_experiment' not in self._stubs: + self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment', + request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_experiment'] + + @property + def create_tensorboard_run(self) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + gca_tensorboard_run.TensorboardRun]: + r"""Return a callable for the create tensorboard run method over gRPC. + + Creates a TensorboardRun. + + Returns: + Callable[[~.CreateTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_run' not in self._stubs: + self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun', + request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['create_tensorboard_run'] + + @property + def get_tensorboard_run(self) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + tensorboard_run.TensorboardRun]: + r"""Return a callable for the get tensorboard run method over gRPC. + + Gets a TensorboardRun. + + Returns: + Callable[[~.GetTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_run' not in self._stubs: + self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun', + request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, + response_deserializer=tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['get_tensorboard_run'] + + @property + def update_tensorboard_run(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + gca_tensorboard_run.TensorboardRun]: + r"""Return a callable for the update tensorboard run method over gRPC. + + Updates a TensorboardRun. + + Returns: + Callable[[~.UpdateTensorboardRunRequest], + ~.TensorboardRun]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_run' not in self._stubs: + self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun', + request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['update_tensorboard_run'] + + @property + def list_tensorboard_runs(self) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + tensorboard_service.ListTensorboardRunsResponse]: + r"""Return a callable for the list tensorboard runs method over gRPC. + + Lists TensorboardRuns in a Location. + + Returns: + Callable[[~.ListTensorboardRunsRequest], + ~.ListTensorboardRunsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_runs' not in self._stubs: + self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns', + request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, + ) + return self._stubs['list_tensorboard_runs'] + + @property + def delete_tensorboard_run(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard run method over gRPC. + + Deletes a TensorboardRun. + + Returns: + Callable[[~.DeleteTensorboardRunRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_run' not in self._stubs: + self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun', + request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_run'] + + @property + def create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + gca_tensorboard_time_series.TensorboardTimeSeries]: + r"""Return a callable for the create tensorboard time series method over gRPC. + + Creates a TensorboardTimeSeries. + + Returns: + Callable[[~.CreateTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_time_series' not in self._stubs: + self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries', + request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['create_tensorboard_time_series'] + + @property + def get_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + tensorboard_time_series.TensorboardTimeSeries]: + r"""Return a callable for the get tensorboard time series method over gRPC. + + Gets a TensorboardTimeSeries. + + Returns: + Callable[[~.GetTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_time_series' not in self._stubs: + self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries', + request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['get_tensorboard_time_series'] + + @property + def update_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + gca_tensorboard_time_series.TensorboardTimeSeries]: + r"""Return a callable for the update tensorboard time series method over gRPC. + + Updates a TensorboardTimeSeries. + + Returns: + Callable[[~.UpdateTensorboardTimeSeriesRequest], + ~.TensorboardTimeSeries]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_time_series' not in self._stubs: + self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries', + request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['update_tensorboard_time_series'] + + @property + def list_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + tensorboard_service.ListTensorboardTimeSeriesResponse]: + r"""Return a callable for the list tensorboard time series method over gRPC. + + Lists TensorboardTimeSeries in a Location. + + Returns: + Callable[[~.ListTensorboardTimeSeriesRequest], + ~.ListTensorboardTimeSeriesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_time_series' not in self._stubs: + self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries', + request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['list_tensorboard_time_series'] + + @property + def delete_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + operations_pb2.Operation]: + r"""Return a callable for the delete tensorboard time series method over gRPC. + + Deletes a TensorboardTimeSeries. + + Returns: + Callable[[~.DeleteTensorboardTimeSeriesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_time_series' not in self._stubs: + self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries', + request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_time_series'] + + @property + def read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + tensorboard_service.ReadTensorboardTimeSeriesDataResponse]: + r"""Return a callable for the read tensorboard time series + data method over gRPC. + + Reads a TensorboardTimeSeries' data. Data is returned in + paginated responses. By default, if the number of data points + stored is less than 1000, all data will be returned. Otherwise, + 1000 data points will be randomly selected from this time series + and returned. This value can be changed by changing + max_data_points. + + Returns: + Callable[[~.ReadTensorboardTimeSeriesDataRequest], + ~.ReadTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_time_series_data' not in self._stubs: + self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_time_series_data'] + + @property + def read_tensorboard_blob_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + tensorboard_service.ReadTensorboardBlobDataResponse]: + r"""Return a callable for the read tensorboard blob data method over gRPC. + + Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Returns: + Callable[[~.ReadTensorboardBlobDataRequest], + ~.ReadTensorboardBlobDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_blob_data' not in self._stubs: + self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData', + request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_blob_data'] + + @property + def write_tensorboard_run_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + tensorboard_service.WriteTensorboardRunDataResponse]: + r"""Return a callable for the write tensorboard run data method over gRPC. + + Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardRunDataRequest], + ~.WriteTensorboardRunDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_run_data' not in self._stubs: + self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData', + request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_run_data'] + + @property + def export_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: + r"""Return a callable for the export tensorboard time series + data method over gRPC. + + Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Returns: + Callable[[~.ExportTensorboardTimeSeriesDataRequest], + ~.ExportTensorboardTimeSeriesDataResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_tensorboard_time_series_data' not in self._stubs: + self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['export_tensorboard_time_series_data'] + + +__all__ = ( + 'TensorboardServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..3acc82eb36 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py @@ -0,0 +1,893 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.longrunning import operations_pb2 # type: ignore +from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import TensorboardServiceGrpcTransport + + +class TensorboardServiceGrpcAsyncIOTransport(TensorboardServiceTransport): + """gRPC AsyncIO backend transport for TensorboardService. + + TensorboardService + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_tensorboard(self) -> Callable[ + [tensorboard_service.CreateTensorboardRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create tensorboard method over gRPC. + + Creates a Tensorboard. + + Returns: + Callable[[~.CreateTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard' not in self._stubs: + self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard', + request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_tensorboard'] + + @property + def get_tensorboard(self) -> Callable[ + [tensorboard_service.GetTensorboardRequest], + Awaitable[tensorboard.Tensorboard]]: + r"""Return a callable for the get tensorboard method over gRPC. + + Gets a Tensorboard. + + Returns: + Callable[[~.GetTensorboardRequest], + Awaitable[~.Tensorboard]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard' not in self._stubs: + self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard', + request_serializer=tensorboard_service.GetTensorboardRequest.serialize, + response_deserializer=tensorboard.Tensorboard.deserialize, + ) + return self._stubs['get_tensorboard'] + + @property + def update_tensorboard(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update tensorboard method over gRPC. + + Updates a Tensorboard. + + Returns: + Callable[[~.UpdateTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard' not in self._stubs: + self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard', + request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_tensorboard'] + + @property + def list_tensorboards(self) -> Callable[ + [tensorboard_service.ListTensorboardsRequest], + Awaitable[tensorboard_service.ListTensorboardsResponse]]: + r"""Return a callable for the list tensorboards method over gRPC. + + Lists Tensorboards in a Location. + + Returns: + Callable[[~.ListTensorboardsRequest], + Awaitable[~.ListTensorboardsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboards' not in self._stubs: + self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards', + request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, + ) + return self._stubs['list_tensorboards'] + + @property + def delete_tensorboard(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard method over gRPC. + + Deletes a Tensorboard. + + Returns: + Callable[[~.DeleteTensorboardRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard' not in self._stubs: + self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard', + request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard'] + + @property + def create_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.CreateTensorboardExperimentRequest], + Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: + r"""Return a callable for the create tensorboard experiment method over gRPC. + + Creates a TensorboardExperiment. + + Returns: + Callable[[~.CreateTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_experiment' not in self._stubs: + self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment', + request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['create_tensorboard_experiment'] + + @property + def get_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.GetTensorboardExperimentRequest], + Awaitable[tensorboard_experiment.TensorboardExperiment]]: + r"""Return a callable for the get tensorboard experiment method over gRPC. + + Gets a TensorboardExperiment. + + Returns: + Callable[[~.GetTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_experiment' not in self._stubs: + self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment', + request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, + response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['get_tensorboard_experiment'] + + @property + def update_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.UpdateTensorboardExperimentRequest], + Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: + r"""Return a callable for the update tensorboard experiment method over gRPC. + + Updates a TensorboardExperiment. + + Returns: + Callable[[~.UpdateTensorboardExperimentRequest], + Awaitable[~.TensorboardExperiment]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_experiment' not in self._stubs: + self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment', + request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, + response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, + ) + return self._stubs['update_tensorboard_experiment'] + + @property + def list_tensorboard_experiments(self) -> Callable[ + [tensorboard_service.ListTensorboardExperimentsRequest], + Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]]: + r"""Return a callable for the list tensorboard experiments method over gRPC. + + Lists TensorboardExperiments in a Location. + + Returns: + Callable[[~.ListTensorboardExperimentsRequest], + Awaitable[~.ListTensorboardExperimentsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_experiments' not in self._stubs: + self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments', + request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, + ) + return self._stubs['list_tensorboard_experiments'] + + @property + def delete_tensorboard_experiment(self) -> Callable[ + [tensorboard_service.DeleteTensorboardExperimentRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard experiment method over gRPC. + + Deletes a TensorboardExperiment. + + Returns: + Callable[[~.DeleteTensorboardExperimentRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_experiment' not in self._stubs: + self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment', + request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_experiment'] + + @property + def create_tensorboard_run(self) -> Callable[ + [tensorboard_service.CreateTensorboardRunRequest], + Awaitable[gca_tensorboard_run.TensorboardRun]]: + r"""Return a callable for the create tensorboard run method over gRPC. + + Creates a TensorboardRun. + + Returns: + Callable[[~.CreateTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_run' not in self._stubs: + self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun', + request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['create_tensorboard_run'] + + @property + def get_tensorboard_run(self) -> Callable[ + [tensorboard_service.GetTensorboardRunRequest], + Awaitable[tensorboard_run.TensorboardRun]]: + r"""Return a callable for the get tensorboard run method over gRPC. + + Gets a TensorboardRun. + + Returns: + Callable[[~.GetTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_run' not in self._stubs: + self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun', + request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, + response_deserializer=tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['get_tensorboard_run'] + + @property + def update_tensorboard_run(self) -> Callable[ + [tensorboard_service.UpdateTensorboardRunRequest], + Awaitable[gca_tensorboard_run.TensorboardRun]]: + r"""Return a callable for the update tensorboard run method over gRPC. + + Updates a TensorboardRun. + + Returns: + Callable[[~.UpdateTensorboardRunRequest], + Awaitable[~.TensorboardRun]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_run' not in self._stubs: + self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun', + request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, + response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, + ) + return self._stubs['update_tensorboard_run'] + + @property + def list_tensorboard_runs(self) -> Callable[ + [tensorboard_service.ListTensorboardRunsRequest], + Awaitable[tensorboard_service.ListTensorboardRunsResponse]]: + r"""Return a callable for the list tensorboard runs method over gRPC. + + Lists TensorboardRuns in a Location. + + Returns: + Callable[[~.ListTensorboardRunsRequest], + Awaitable[~.ListTensorboardRunsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_runs' not in self._stubs: + self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns', + request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, + ) + return self._stubs['list_tensorboard_runs'] + + @property + def delete_tensorboard_run(self) -> Callable[ + [tensorboard_service.DeleteTensorboardRunRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard run method over gRPC. + + Deletes a TensorboardRun. + + Returns: + Callable[[~.DeleteTensorboardRunRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_run' not in self._stubs: + self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun', + request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_run'] + + @property + def create_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.CreateTensorboardTimeSeriesRequest], + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: + r"""Return a callable for the create tensorboard time series method over gRPC. + + Creates a TensorboardTimeSeries. + + Returns: + Callable[[~.CreateTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_tensorboard_time_series' not in self._stubs: + self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries', + request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['create_tensorboard_time_series'] + + @property + def get_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.GetTensorboardTimeSeriesRequest], + Awaitable[tensorboard_time_series.TensorboardTimeSeries]]: + r"""Return a callable for the get tensorboard time series method over gRPC. + + Gets a TensorboardTimeSeries. + + Returns: + Callable[[~.GetTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_tensorboard_time_series' not in self._stubs: + self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries', + request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['get_tensorboard_time_series'] + + @property + def update_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.UpdateTensorboardTimeSeriesRequest], + Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: + r"""Return a callable for the update tensorboard time series method over gRPC. + + Updates a TensorboardTimeSeries. + + Returns: + Callable[[~.UpdateTensorboardTimeSeriesRequest], + Awaitable[~.TensorboardTimeSeries]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_tensorboard_time_series' not in self._stubs: + self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries', + request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, + response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, + ) + return self._stubs['update_tensorboard_time_series'] + + @property + def list_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.ListTensorboardTimeSeriesRequest], + Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]]: + r"""Return a callable for the list tensorboard time series method over gRPC. + + Lists TensorboardTimeSeries in a Location. + + Returns: + Callable[[~.ListTensorboardTimeSeriesRequest], + Awaitable[~.ListTensorboardTimeSeriesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tensorboard_time_series' not in self._stubs: + self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries', + request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, + response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, + ) + return self._stubs['list_tensorboard_time_series'] + + @property + def delete_tensorboard_time_series(self) -> Callable[ + [tensorboard_service.DeleteTensorboardTimeSeriesRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the delete tensorboard time series method over gRPC. + + Deletes a TensorboardTimeSeries. + + Returns: + Callable[[~.DeleteTensorboardTimeSeriesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_tensorboard_time_series' not in self._stubs: + self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries', + request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['delete_tensorboard_time_series'] + + @property + def read_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse]]: + r"""Return a callable for the read tensorboard time series + data method over gRPC. + + Reads a TensorboardTimeSeries' data. Data is returned in + paginated responses. By default, if the number of data points + stored is less than 1000, all data will be returned. Otherwise, + 1000 data points will be randomly selected from this time series + and returned. This value can be changed by changing + max_data_points. + + Returns: + Callable[[~.ReadTensorboardTimeSeriesDataRequest], + Awaitable[~.ReadTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_time_series_data' not in self._stubs: + self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_time_series_data'] + + @property + def read_tensorboard_blob_data(self) -> Callable[ + [tensorboard_service.ReadTensorboardBlobDataRequest], + Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse]]: + r"""Return a callable for the read tensorboard blob data method over gRPC. + + Gets bytes of TensorboardBlobs. + This is to allow reading blob data stored in consumer + project's Cloud Storage bucket without users having to + obtain Cloud Storage access permission. + + Returns: + Callable[[~.ReadTensorboardBlobDataRequest], + Awaitable[~.ReadTensorboardBlobDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_tensorboard_blob_data' not in self._stubs: + self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData', + request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, + response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, + ) + return self._stubs['read_tensorboard_blob_data'] + + @property + def write_tensorboard_run_data(self) -> Callable[ + [tensorboard_service.WriteTensorboardRunDataRequest], + Awaitable[tensorboard_service.WriteTensorboardRunDataResponse]]: + r"""Return a callable for the write tensorboard run data method over gRPC. + + Write time series data points into multiple + TensorboardTimeSeries under a TensorboardRun. If any + data fail to be ingested, an error will be returned. + + Returns: + Callable[[~.WriteTensorboardRunDataRequest], + Awaitable[~.WriteTensorboardRunDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'write_tensorboard_run_data' not in self._stubs: + self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData', + request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, + response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, + ) + return self._stubs['write_tensorboard_run_data'] + + @property + def export_tensorboard_time_series_data(self) -> Callable[ + [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], + Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]]: + r"""Return a callable for the export tensorboard time series + data method over gRPC. + + Exports a TensorboardTimeSeries' data. Data is + returned in paginated responses. + + Returns: + Callable[[~.ExportTensorboardTimeSeriesDataRequest], + Awaitable[~.ExportTensorboardTimeSeriesDataResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'export_tensorboard_time_series_data' not in self._stubs: + self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData', + request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, + response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, + ) + return self._stubs['export_tensorboard_time_series_data'] + + +__all__ = ( + 'TensorboardServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py new file mode 100644 index 0000000000..d629499098 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import VizierServiceClient +from .async_client import VizierServiceAsyncClient + +__all__ = ( + 'VizierServiceClient', + 'VizierServiceAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py new file mode 100644 index 0000000000..0846b805df --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -0,0 +1,1279 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport +from .client import VizierServiceClient + + +class VizierServiceAsyncClient: + """Vertex Vizier API. + Vizier service is a GCP service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + """ + + _client: VizierServiceClient + + DEFAULT_ENDPOINT = VizierServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = VizierServiceClient.DEFAULT_MTLS_ENDPOINT + + custom_job_path = staticmethod(VizierServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(VizierServiceClient.parse_custom_job_path) + study_path = staticmethod(VizierServiceClient.study_path) + parse_study_path = staticmethod(VizierServiceClient.parse_study_path) + trial_path = staticmethod(VizierServiceClient.trial_path) + parse_trial_path = staticmethod(VizierServiceClient.parse_trial_path) + common_billing_account_path = staticmethod(VizierServiceClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(VizierServiceClient.parse_common_billing_account_path) + common_folder_path = staticmethod(VizierServiceClient.common_folder_path) + parse_common_folder_path = staticmethod(VizierServiceClient.parse_common_folder_path) + common_organization_path = staticmethod(VizierServiceClient.common_organization_path) + parse_common_organization_path = staticmethod(VizierServiceClient.parse_common_organization_path) + common_project_path = staticmethod(VizierServiceClient.common_project_path) + parse_common_project_path = staticmethod(VizierServiceClient.parse_common_project_path) + common_location_path = staticmethod(VizierServiceClient.common_location_path) + parse_common_location_path = staticmethod(VizierServiceClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceAsyncClient: The constructed client. + """ + return VizierServiceClient.from_service_account_info.__func__(VizierServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceAsyncClient: The constructed client. + """ + return VizierServiceClient.from_service_account_file.__func__(VizierServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> VizierServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VizierServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(VizierServiceClient).get_transport_class, type(VizierServiceClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, VizierServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vizier service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.VizierServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = VizierServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_study(self, + request: vizier_service.CreateStudyRequest = None, + *, + parent: str = None, + study: gca_study.Study = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_study.Study: + r"""Creates a Study. A resource name will be generated + after creation of the Study. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateStudyRequest`): + The request object. Request message for + [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. + parent (:class:`str`): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + study (:class:`google.cloud.aiplatform_v1beta1.types.Study`): + Required. The Study configuration + used to create the Study. + + This corresponds to the ``study`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, study]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.CreateStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if study is not None: + request.study = study + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_study, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_study(self, + request: vizier_service.GetStudyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Gets a Study by name. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetStudyRequest`): + The request object. Request message for + [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. + name (:class:`str`): + Required. The name of the Study resource. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.GetStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_study, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_studies(self, + request: vizier_service.ListStudiesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListStudiesAsyncPager: + r"""Lists all the studies in a region for an associated + project. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListStudiesRequest`): + The request object. Request message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + parent (:class:`str`): + Required. The resource name of the Location to list the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesAsyncPager: + Response message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.ListStudiesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_studies, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListStudiesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_study(self, + request: vizier_service.DeleteStudyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Study. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest`): + The request object. Request message for + [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. + name (:class:`str`): + Required. The name of the Study resource to be deleted. + Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.DeleteStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_study, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def lookup_study(self, + request: vizier_service.LookupStudyRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.LookupStudyRequest`): + The request object. Request message for + [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. + parent (:class:`str`): + Required. The resource name of the Location to get the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.LookupStudyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.lookup_study, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def suggest_trials(self, + request: vizier_service.SuggestTrialsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Adds one or more Trials to a Study, with parameter values + suggested by Vertex Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest`): + The request object. Request message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` + Response message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.SuggestTrialsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.suggest_trials, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + vizier_service.SuggestTrialsResponse, + metadata_type=vizier_service.SuggestTrialsMetadata, + ) + + # Done; return the response. + return response + + async def create_trial(self, + request: vizier_service.CreateTrialRequest = None, + *, + parent: str = None, + trial: study.Trial = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a user provided Trial to a Study. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTrialRequest`): + The request object. Request message for + [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. + parent (:class:`str`): + Required. The resource name of the Study to create the + Trial in. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + trial (:class:`google.cloud.aiplatform_v1beta1.types.Trial`): + Required. The Trial to create. + This corresponds to the ``trial`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, trial]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.CreateTrialRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if trial is not None: + request.trial = trial + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_trial(self, + request: vizier_service.GetTrialRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Gets a Trial. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.GetTrialRequest`): + The request object. Request message for + [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. + name (:class:`str`): + Required. The name of the Trial resource. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.GetTrialRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_trials(self, + request: vizier_service.ListTrialsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrialsAsyncPager: + r"""Lists the Trials associated with a Study. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListTrialsRequest`): + The request object. Request message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + parent (:class:`str`): + Required. The resource name of the Study to list the + Trial from. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsAsyncPager: + Response message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.ListTrialsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_trials, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTrialsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def add_trial_measurement(self, + request: vizier_service.AddTrialMeasurementRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest`): + The request object. Request message for + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.AddTrialMeasurementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.add_trial_measurement, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def complete_trial(self, + request: vizier_service.CompleteTrialRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Marks a Trial as complete. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest`): + The request object. Request message for + [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.CompleteTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.complete_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_trial(self, + request: vizier_service.DeleteTrialRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Trial. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest`): + The request object. Request message for + [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. + name (:class:`str`): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.DeleteTrialRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def check_trial_early_stopping_state(self, + request: vizier_service.CheckTrialEarlyStoppingStateRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest`): + The request object. Request message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` + Response message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.check_trial_early_stopping_state, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + vizier_service.CheckTrialEarlyStoppingStateResponse, + metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, + ) + + # Done; return the response. + return response + + async def stop_trial(self, + request: vizier_service.StopTrialRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Stops a Trial. + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.StopTrialRequest`): + The request object. Request message for + [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + request = vizier_service.StopTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.stop_trial, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_optimal_trials(self, + request: vizier_service.ListOptimalTrialsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vizier_service.ListOptimalTrialsResponse: + r"""Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + Args: + request (:class:`google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest`): + The request object. Request message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + parent (:class:`str`): + Required. The name of the Study that + the optimal Trial belongs to. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: + Response message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = vizier_service.ListOptimalTrialsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_optimal_trials, + default_timeout=5.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "VizierServiceAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py new file mode 100644 index 0000000000..c41dc2463c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -0,0 +1,1489 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import VizierServiceGrpcTransport +from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport + + +class VizierServiceClientMeta(type): + """Metaclass for the VizierService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] + _transport_registry["grpc"] = VizierServiceGrpcTransport + _transport_registry["grpc_asyncio"] = VizierServiceGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[VizierServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class VizierServiceClient(metaclass=VizierServiceClientMeta): + """Vertex Vizier API. + Vizier service is a GCP service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VizierServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> VizierServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VizierServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def custom_job_path(project: str,location: str,custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str,str]: + """Parses a custom_job path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def study_path(project: str,location: str,study: str,) -> str: + """Returns a fully-qualified study string.""" + return "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) + + @staticmethod + def parse_study_path(path: str) -> Dict[str,str]: + """Parses a study path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def trial_path(project: str,location: str,study: str,trial: str,) -> str: + """Returns a fully-qualified trial string.""" + return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + + @staticmethod + def parse_trial_path(path: str) -> Dict[str,str]: + """Parses a trial path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, VizierServiceTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vizier service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, VizierServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, VizierServiceTransport): + # transport is a VizierServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_study(self, + request: vizier_service.CreateStudyRequest = None, + *, + parent: str = None, + study: gca_study.Study = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_study.Study: + r"""Creates a Study. A resource name will be generated + after creation of the Study. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateStudyRequest): + The request object. Request message for + [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. + parent (str): + Required. The resource name of the Location to create + the CustomJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + study (google.cloud.aiplatform_v1beta1.types.Study): + Required. The Study configuration + used to create the Study. + + This corresponds to the ``study`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, study]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CreateStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CreateStudyRequest): + request = vizier_service.CreateStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if study is not None: + request.study = study + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_study(self, + request: vizier_service.GetStudyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Gets a Study by name. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetStudyRequest): + The request object. Request message for + [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. + name (str): + Required. The name of the Study resource. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.GetStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.GetStudyRequest): + request = vizier_service.GetStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_studies(self, + request: vizier_service.ListStudiesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListStudiesPager: + r"""Lists all the studies in a region for an associated + project. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): + The request object. Request message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + parent (str): + Required. The resource name of the Location to list the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesPager: + Response message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.ListStudiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.ListStudiesRequest): + request = vizier_service.ListStudiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_studies] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListStudiesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_study(self, + request: vizier_service.DeleteStudyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Study. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest): + The request object. Request message for + [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. + name (str): + Required. The name of the Study resource to be deleted. + Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.DeleteStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.DeleteStudyRequest): + request = vizier_service.DeleteStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def lookup_study(self, + request: vizier_service.LookupStudyRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Study: + r"""Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + Args: + request (google.cloud.aiplatform_v1beta1.types.LookupStudyRequest): + The request object. Request message for + [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. + parent (str): + Required. The resource name of the Location to get the + Study from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Study: + A message representing a Study. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.LookupStudyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.LookupStudyRequest): + request = vizier_service.LookupStudyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.lookup_study] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def suggest_trials(self, + request: vizier_service.SuggestTrialsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Adds one or more Trials to a Study, with parameter values + suggested by Vertex Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + Args: + request (google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest): + The request object. Request message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` + Response message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.SuggestTrialsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.SuggestTrialsRequest): + request = vizier_service.SuggestTrialsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.suggest_trials] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + vizier_service.SuggestTrialsResponse, + metadata_type=vizier_service.SuggestTrialsMetadata, + ) + + # Done; return the response. + return response + + def create_trial(self, + request: vizier_service.CreateTrialRequest = None, + *, + parent: str = None, + trial: study.Trial = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a user provided Trial to a Study. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CreateTrialRequest): + The request object. Request message for + [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. + parent (str): + Required. The resource name of the Study to create the + Trial in. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + trial (google.cloud.aiplatform_v1beta1.types.Trial): + Required. The Trial to create. + This corresponds to the ``trial`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, trial]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CreateTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CreateTrialRequest): + request = vizier_service.CreateTrialRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if trial is not None: + request.trial = trial + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_trial(self, + request: vizier_service.GetTrialRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Gets a Trial. + + Args: + request (google.cloud.aiplatform_v1beta1.types.GetTrialRequest): + The request object. Request message for + [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. + name (str): + Required. The name of the Trial resource. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.GetTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.GetTrialRequest): + request = vizier_service.GetTrialRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_trials(self, + request: vizier_service.ListTrialsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTrialsPager: + r"""Lists the Trials associated with a Study. + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): + The request object. Request message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + parent (str): + Required. The resource name of the Study to list the + Trial from. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsPager: + Response message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.ListTrialsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.ListTrialsRequest): + request = vizier_service.ListTrialsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_trials] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTrialsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_trial_measurement(self, + request: vizier_service.AddTrialMeasurementRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + Args: + request (google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest): + The request object. Request message for + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.AddTrialMeasurementRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.AddTrialMeasurementRequest): + request = vizier_service.AddTrialMeasurementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_trial_measurement] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def complete_trial(self, + request: vizier_service.CompleteTrialRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Marks a Trial as complete. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest): + The request object. Request message for + [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CompleteTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CompleteTrialRequest): + request = vizier_service.CompleteTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.complete_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_trial(self, + request: vizier_service.DeleteTrialRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a Trial. + + Args: + request (google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest): + The request object. Request message for + [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.DeleteTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.DeleteTrialRequest): + request = vizier_service.DeleteTrialRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def check_trial_early_stopping_state(self, + request: vizier_service.CheckTrialEarlyStoppingStateRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + Args: + request (google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest): + The request object. Request message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` + Response message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.CheckTrialEarlyStoppingStateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.CheckTrialEarlyStoppingStateRequest): + request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_trial_early_stopping_state] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("trial_name", request.trial_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + vizier_service.CheckTrialEarlyStoppingStateResponse, + metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, + ) + + # Done; return the response. + return response + + def stop_trial(self, + request: vizier_service.StopTrialRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> study.Trial: + r"""Stops a Trial. + + Args: + request (google.cloud.aiplatform_v1beta1.types.StopTrialRequest): + The request object. Request message for + [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Trial: + A message representing a Trial. A + Trial contains a unique set of + Parameters that has been or will be + evaluated, along with the objective + metrics got by running the Trial. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.StopTrialRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.StopTrialRequest): + request = vizier_service.StopTrialRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.stop_trial] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_optimal_trials(self, + request: vizier_service.ListOptimalTrialsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vizier_service.ListOptimalTrialsResponse: + r"""Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + Args: + request (google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest): + The request object. Request message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + parent (str): + Required. The name of the Study that + the optimal Trial belongs to. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: + Response message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a vizier_service.ListOptimalTrialsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vizier_service.ListOptimalTrialsRequest): + request = vizier_service.ListOptimalTrialsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_optimal_trials] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-aiplatform", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "VizierServiceClient", +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py new file mode 100644 index 0000000000..caaf517a27 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import vizier_service + + +class ListStudiesPager: + """A pager for iterating through ``list_studies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``studies`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListStudies`` requests and continue to iterate + through the ``studies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., vizier_service.ListStudiesResponse], + request: vizier_service.ListStudiesRequest, + response: vizier_service.ListStudiesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListStudiesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListStudiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[vizier_service.ListStudiesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[study.Study]: + for page in self.pages: + yield from page.studies + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListStudiesAsyncPager: + """A pager for iterating through ``list_studies`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``studies`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListStudies`` requests and continue to iterate + through the ``studies`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[vizier_service.ListStudiesResponse]], + request: vizier_service.ListStudiesRequest, + response: vizier_service.ListStudiesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListStudiesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListStudiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[vizier_service.ListStudiesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[study.Study]: + async def async_generator(): + async for page in self.pages: + for response in page.studies: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrialsPager: + """A pager for iterating through ``list_trials`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``trials`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTrials`` requests and continue to iterate + through the ``trials`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., vizier_service.ListTrialsResponse], + request: vizier_service.ListTrialsRequest, + response: vizier_service.ListTrialsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTrialsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListTrialsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[vizier_service.ListTrialsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[study.Trial]: + for page in self.pages: + yield from page.trials + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTrialsAsyncPager: + """A pager for iterating through ``list_trials`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``trials`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTrials`` requests and continue to iterate + through the ``trials`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[vizier_service.ListTrialsResponse]], + request: vizier_service.ListTrialsRequest, + response: vizier_service.ListTrialsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListTrialsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vizier_service.ListTrialsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[vizier_service.ListTrialsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[study.Trial]: + async def async_generator(): + async for page in self.pages: + for response in page.trials: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py new file mode 100644 index 0000000000..afc70ea68e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import VizierServiceTransport +from .grpc import VizierServiceGrpcTransport +from .grpc_asyncio import VizierServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] +_transport_registry['grpc'] = VizierServiceGrpcTransport +_transport_registry['grpc_asyncio'] = VizierServiceGrpcAsyncIOTransport + +__all__ = ( + 'VizierServiceTransport', + 'VizierServiceGrpcTransport', + 'VizierServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py new file mode 100644 index 0000000000..bd9a9d9c50 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py @@ -0,0 +1,374 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-aiplatform', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class VizierServiceTransport(abc.ABC): + """Abstract transport class for VizierService.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'aiplatform.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_study: gapic_v1.method.wrap_method( + self.create_study, + default_timeout=5.0, + client_info=client_info, + ), + self.get_study: gapic_v1.method.wrap_method( + self.get_study, + default_timeout=5.0, + client_info=client_info, + ), + self.list_studies: gapic_v1.method.wrap_method( + self.list_studies, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_study: gapic_v1.method.wrap_method( + self.delete_study, + default_timeout=5.0, + client_info=client_info, + ), + self.lookup_study: gapic_v1.method.wrap_method( + self.lookup_study, + default_timeout=5.0, + client_info=client_info, + ), + self.suggest_trials: gapic_v1.method.wrap_method( + self.suggest_trials, + default_timeout=5.0, + client_info=client_info, + ), + self.create_trial: gapic_v1.method.wrap_method( + self.create_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.get_trial: gapic_v1.method.wrap_method( + self.get_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.list_trials: gapic_v1.method.wrap_method( + self.list_trials, + default_timeout=5.0, + client_info=client_info, + ), + self.add_trial_measurement: gapic_v1.method.wrap_method( + self.add_trial_measurement, + default_timeout=5.0, + client_info=client_info, + ), + self.complete_trial: gapic_v1.method.wrap_method( + self.complete_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.delete_trial: gapic_v1.method.wrap_method( + self.delete_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.check_trial_early_stopping_state: gapic_v1.method.wrap_method( + self.check_trial_early_stopping_state, + default_timeout=5.0, + client_info=client_info, + ), + self.stop_trial: gapic_v1.method.wrap_method( + self.stop_trial, + default_timeout=5.0, + client_info=client_info, + ), + self.list_optimal_trials: gapic_v1.method.wrap_method( + self.list_optimal_trials, + default_timeout=5.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_study(self) -> Callable[ + [vizier_service.CreateStudyRequest], + Union[ + gca_study.Study, + Awaitable[gca_study.Study] + ]]: + raise NotImplementedError() + + @property + def get_study(self) -> Callable[ + [vizier_service.GetStudyRequest], + Union[ + study.Study, + Awaitable[study.Study] + ]]: + raise NotImplementedError() + + @property + def list_studies(self) -> Callable[ + [vizier_service.ListStudiesRequest], + Union[ + vizier_service.ListStudiesResponse, + Awaitable[vizier_service.ListStudiesResponse] + ]]: + raise NotImplementedError() + + @property + def delete_study(self) -> Callable[ + [vizier_service.DeleteStudyRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def lookup_study(self) -> Callable[ + [vizier_service.LookupStudyRequest], + Union[ + study.Study, + Awaitable[study.Study] + ]]: + raise NotImplementedError() + + @property + def suggest_trials(self) -> Callable[ + [vizier_service.SuggestTrialsRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def create_trial(self) -> Callable[ + [vizier_service.CreateTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def get_trial(self) -> Callable[ + [vizier_service.GetTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def list_trials(self) -> Callable[ + [vizier_service.ListTrialsRequest], + Union[ + vizier_service.ListTrialsResponse, + Awaitable[vizier_service.ListTrialsResponse] + ]]: + raise NotImplementedError() + + @property + def add_trial_measurement(self) -> Callable[ + [vizier_service.AddTrialMeasurementRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def complete_trial(self) -> Callable[ + [vizier_service.CompleteTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def delete_trial(self) -> Callable[ + [vizier_service.DeleteTrialRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def check_trial_early_stopping_state(self) -> Callable[ + [vizier_service.CheckTrialEarlyStoppingStateRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def stop_trial(self) -> Callable[ + [vizier_service.StopTrialRequest], + Union[ + study.Trial, + Awaitable[study.Trial] + ]]: + raise NotImplementedError() + + @property + def list_optimal_trials(self) -> Callable[ + [vizier_service.ListOptimalTrialsRequest], + Union[ + vizier_service.ListOptimalTrialsResponse, + Awaitable[vizier_service.ListOptimalTrialsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'VizierServiceTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py new file mode 100644 index 0000000000..7212be0372 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py @@ -0,0 +1,657 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO + + +class VizierServiceGrpcTransport(VizierServiceTransport): + """gRPC backend transport for VizierService. + + Vertex Vizier API. + Vizier service is a GCP service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_study(self) -> Callable[ + [vizier_service.CreateStudyRequest], + gca_study.Study]: + r"""Return a callable for the create study method over gRPC. + + Creates a Study. A resource name will be generated + after creation of the Study. + + Returns: + Callable[[~.CreateStudyRequest], + ~.Study]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_study' not in self._stubs: + self._stubs['create_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CreateStudy', + request_serializer=vizier_service.CreateStudyRequest.serialize, + response_deserializer=gca_study.Study.deserialize, + ) + return self._stubs['create_study'] + + @property + def get_study(self) -> Callable[ + [vizier_service.GetStudyRequest], + study.Study]: + r"""Return a callable for the get study method over gRPC. + + Gets a Study by name. + + Returns: + Callable[[~.GetStudyRequest], + ~.Study]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_study' not in self._stubs: + self._stubs['get_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/GetStudy', + request_serializer=vizier_service.GetStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['get_study'] + + @property + def list_studies(self) -> Callable[ + [vizier_service.ListStudiesRequest], + vizier_service.ListStudiesResponse]: + r"""Return a callable for the list studies method over gRPC. + + Lists all the studies in a region for an associated + project. + + Returns: + Callable[[~.ListStudiesRequest], + ~.ListStudiesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_studies' not in self._stubs: + self._stubs['list_studies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListStudies', + request_serializer=vizier_service.ListStudiesRequest.serialize, + response_deserializer=vizier_service.ListStudiesResponse.deserialize, + ) + return self._stubs['list_studies'] + + @property + def delete_study(self) -> Callable[ + [vizier_service.DeleteStudyRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete study method over gRPC. + + Deletes a Study. + + Returns: + Callable[[~.DeleteStudyRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_study' not in self._stubs: + self._stubs['delete_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/DeleteStudy', + request_serializer=vizier_service.DeleteStudyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_study'] + + @property + def lookup_study(self) -> Callable[ + [vizier_service.LookupStudyRequest], + study.Study]: + r"""Return a callable for the lookup study method over gRPC. + + Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + Returns: + Callable[[~.LookupStudyRequest], + ~.Study]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'lookup_study' not in self._stubs: + self._stubs['lookup_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/LookupStudy', + request_serializer=vizier_service.LookupStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['lookup_study'] + + @property + def suggest_trials(self) -> Callable[ + [vizier_service.SuggestTrialsRequest], + operations_pb2.Operation]: + r"""Return a callable for the suggest trials method over gRPC. + + Adds one or more Trials to a Study, with parameter values + suggested by Vertex Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + Returns: + Callable[[~.SuggestTrialsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'suggest_trials' not in self._stubs: + self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/SuggestTrials', + request_serializer=vizier_service.SuggestTrialsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['suggest_trials'] + + @property + def create_trial(self) -> Callable[ + [vizier_service.CreateTrialRequest], + study.Trial]: + r"""Return a callable for the create trial method over gRPC. + + Adds a user provided Trial to a Study. + + Returns: + Callable[[~.CreateTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_trial' not in self._stubs: + self._stubs['create_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial', + request_serializer=vizier_service.CreateTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['create_trial'] + + @property + def get_trial(self) -> Callable[ + [vizier_service.GetTrialRequest], + study.Trial]: + r"""Return a callable for the get trial method over gRPC. + + Gets a Trial. + + Returns: + Callable[[~.GetTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_trial' not in self._stubs: + self._stubs['get_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/GetTrial', + request_serializer=vizier_service.GetTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['get_trial'] + + @property + def list_trials(self) -> Callable[ + [vizier_service.ListTrialsRequest], + vizier_service.ListTrialsResponse]: + r"""Return a callable for the list trials method over gRPC. + + Lists the Trials associated with a Study. + + Returns: + Callable[[~.ListTrialsRequest], + ~.ListTrialsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_trials' not in self._stubs: + self._stubs['list_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListTrials', + request_serializer=vizier_service.ListTrialsRequest.serialize, + response_deserializer=vizier_service.ListTrialsResponse.deserialize, + ) + return self._stubs['list_trials'] + + @property + def add_trial_measurement(self) -> Callable[ + [vizier_service.AddTrialMeasurementRequest], + study.Trial]: + r"""Return a callable for the add trial measurement method over gRPC. + + Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + Returns: + Callable[[~.AddTrialMeasurementRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_trial_measurement' not in self._stubs: + self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement', + request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['add_trial_measurement'] + + @property + def complete_trial(self) -> Callable[ + [vizier_service.CompleteTrialRequest], + study.Trial]: + r"""Return a callable for the complete trial method over gRPC. + + Marks a Trial as complete. + + Returns: + Callable[[~.CompleteTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_trial' not in self._stubs: + self._stubs['complete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial', + request_serializer=vizier_service.CompleteTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['complete_trial'] + + @property + def delete_trial(self) -> Callable[ + [vizier_service.DeleteTrialRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete trial method over gRPC. + + Deletes a Trial. + + Returns: + Callable[[~.DeleteTrialRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_trial' not in self._stubs: + self._stubs['delete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial', + request_serializer=vizier_service.DeleteTrialRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_trial'] + + @property + def check_trial_early_stopping_state(self) -> Callable[ + [vizier_service.CheckTrialEarlyStoppingStateRequest], + operations_pb2.Operation]: + r"""Return a callable for the check trial early stopping + state method over gRPC. + + Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + Returns: + Callable[[~.CheckTrialEarlyStoppingStateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_trial_early_stopping_state' not in self._stubs: + self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState', + request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['check_trial_early_stopping_state'] + + @property + def stop_trial(self) -> Callable[ + [vizier_service.StopTrialRequest], + study.Trial]: + r"""Return a callable for the stop trial method over gRPC. + + Stops a Trial. + + Returns: + Callable[[~.StopTrialRequest], + ~.Trial]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'stop_trial' not in self._stubs: + self._stubs['stop_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/StopTrial', + request_serializer=vizier_service.StopTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['stop_trial'] + + @property + def list_optimal_trials(self) -> Callable[ + [vizier_service.ListOptimalTrialsRequest], + vizier_service.ListOptimalTrialsResponse]: + r"""Return a callable for the list optimal trials method over gRPC. + + Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + Returns: + Callable[[~.ListOptimalTrialsRequest], + ~.ListOptimalTrialsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_optimal_trials' not in self._stubs: + self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListOptimalTrials', + request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, + response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, + ) + return self._stubs['list_optimal_trials'] + + +__all__ = ( + 'VizierServiceGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..bb230b8767 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py @@ -0,0 +1,661 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import VizierServiceGrpcTransport + + +class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): + """gRPC AsyncIO backend transport for VizierService. + + Vertex Vizier API. + Vizier service is a GCP service to solve blackbox optimization + problems, such as tuning machine learning hyperparameters and + searching over deep learning architectures. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'aiplatform.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_study(self) -> Callable[ + [vizier_service.CreateStudyRequest], + Awaitable[gca_study.Study]]: + r"""Return a callable for the create study method over gRPC. + + Creates a Study. A resource name will be generated + after creation of the Study. + + Returns: + Callable[[~.CreateStudyRequest], + Awaitable[~.Study]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_study' not in self._stubs: + self._stubs['create_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CreateStudy', + request_serializer=vizier_service.CreateStudyRequest.serialize, + response_deserializer=gca_study.Study.deserialize, + ) + return self._stubs['create_study'] + + @property + def get_study(self) -> Callable[ + [vizier_service.GetStudyRequest], + Awaitable[study.Study]]: + r"""Return a callable for the get study method over gRPC. + + Gets a Study by name. + + Returns: + Callable[[~.GetStudyRequest], + Awaitable[~.Study]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_study' not in self._stubs: + self._stubs['get_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/GetStudy', + request_serializer=vizier_service.GetStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['get_study'] + + @property + def list_studies(self) -> Callable[ + [vizier_service.ListStudiesRequest], + Awaitable[vizier_service.ListStudiesResponse]]: + r"""Return a callable for the list studies method over gRPC. + + Lists all the studies in a region for an associated + project. + + Returns: + Callable[[~.ListStudiesRequest], + Awaitable[~.ListStudiesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_studies' not in self._stubs: + self._stubs['list_studies'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListStudies', + request_serializer=vizier_service.ListStudiesRequest.serialize, + response_deserializer=vizier_service.ListStudiesResponse.deserialize, + ) + return self._stubs['list_studies'] + + @property + def delete_study(self) -> Callable[ + [vizier_service.DeleteStudyRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete study method over gRPC. + + Deletes a Study. + + Returns: + Callable[[~.DeleteStudyRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_study' not in self._stubs: + self._stubs['delete_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/DeleteStudy', + request_serializer=vizier_service.DeleteStudyRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_study'] + + @property + def lookup_study(self) -> Callable[ + [vizier_service.LookupStudyRequest], + Awaitable[study.Study]]: + r"""Return a callable for the lookup study method over gRPC. + + Looks a study up using the user-defined display_name field + instead of the fully qualified resource name. + + Returns: + Callable[[~.LookupStudyRequest], + Awaitable[~.Study]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'lookup_study' not in self._stubs: + self._stubs['lookup_study'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/LookupStudy', + request_serializer=vizier_service.LookupStudyRequest.serialize, + response_deserializer=study.Study.deserialize, + ) + return self._stubs['lookup_study'] + + @property + def suggest_trials(self) -> Callable[ + [vizier_service.SuggestTrialsRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the suggest trials method over gRPC. + + Adds one or more Trials to a Study, with parameter values + suggested by Vertex Vizier. Returns a long-running operation + associated with the generation of Trial suggestions. When this + long-running operation succeeds, it will contain a + [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. + + Returns: + Callable[[~.SuggestTrialsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'suggest_trials' not in self._stubs: + self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/SuggestTrials', + request_serializer=vizier_service.SuggestTrialsRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['suggest_trials'] + + @property + def create_trial(self) -> Callable[ + [vizier_service.CreateTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the create trial method over gRPC. + + Adds a user provided Trial to a Study. + + Returns: + Callable[[~.CreateTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_trial' not in self._stubs: + self._stubs['create_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial', + request_serializer=vizier_service.CreateTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['create_trial'] + + @property + def get_trial(self) -> Callable[ + [vizier_service.GetTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the get trial method over gRPC. + + Gets a Trial. + + Returns: + Callable[[~.GetTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_trial' not in self._stubs: + self._stubs['get_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/GetTrial', + request_serializer=vizier_service.GetTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['get_trial'] + + @property + def list_trials(self) -> Callable[ + [vizier_service.ListTrialsRequest], + Awaitable[vizier_service.ListTrialsResponse]]: + r"""Return a callable for the list trials method over gRPC. + + Lists the Trials associated with a Study. + + Returns: + Callable[[~.ListTrialsRequest], + Awaitable[~.ListTrialsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_trials' not in self._stubs: + self._stubs['list_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListTrials', + request_serializer=vizier_service.ListTrialsRequest.serialize, + response_deserializer=vizier_service.ListTrialsResponse.deserialize, + ) + return self._stubs['list_trials'] + + @property + def add_trial_measurement(self) -> Callable[ + [vizier_service.AddTrialMeasurementRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the add trial measurement method over gRPC. + + Adds a measurement of the objective metrics to a + Trial. This measurement is assumed to have been taken + before the Trial is complete. + + Returns: + Callable[[~.AddTrialMeasurementRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'add_trial_measurement' not in self._stubs: + self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement', + request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['add_trial_measurement'] + + @property + def complete_trial(self) -> Callable[ + [vizier_service.CompleteTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the complete trial method over gRPC. + + Marks a Trial as complete. + + Returns: + Callable[[~.CompleteTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_trial' not in self._stubs: + self._stubs['complete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial', + request_serializer=vizier_service.CompleteTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['complete_trial'] + + @property + def delete_trial(self) -> Callable[ + [vizier_service.DeleteTrialRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete trial method over gRPC. + + Deletes a Trial. + + Returns: + Callable[[~.DeleteTrialRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_trial' not in self._stubs: + self._stubs['delete_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial', + request_serializer=vizier_service.DeleteTrialRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_trial'] + + @property + def check_trial_early_stopping_state(self) -> Callable[ + [vizier_service.CheckTrialEarlyStoppingStateRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the check trial early stopping + state method over gRPC. + + Checks whether a Trial should stop or not. Returns a + long-running operation. When the operation is successful, it + will contain a + [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. + + Returns: + Callable[[~.CheckTrialEarlyStoppingStateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_trial_early_stopping_state' not in self._stubs: + self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState', + request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['check_trial_early_stopping_state'] + + @property + def stop_trial(self) -> Callable[ + [vizier_service.StopTrialRequest], + Awaitable[study.Trial]]: + r"""Return a callable for the stop trial method over gRPC. + + Stops a Trial. + + Returns: + Callable[[~.StopTrialRequest], + Awaitable[~.Trial]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'stop_trial' not in self._stubs: + self._stubs['stop_trial'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/StopTrial', + request_serializer=vizier_service.StopTrialRequest.serialize, + response_deserializer=study.Trial.deserialize, + ) + return self._stubs['stop_trial'] + + @property + def list_optimal_trials(self) -> Callable[ + [vizier_service.ListOptimalTrialsRequest], + Awaitable[vizier_service.ListOptimalTrialsResponse]]: + r"""Return a callable for the list optimal trials method over gRPC. + + Lists the pareto-optimal Trials for multi-objective Study or the + optimal Trials for single-objective Study. The definition of + pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + + Returns: + Callable[[~.ListOptimalTrialsRequest], + Awaitable[~.ListOptimalTrialsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_optimal_trials' not in self._stubs: + self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( + '/google.cloud.aiplatform.v1beta1.VizierService/ListOptimalTrials', + request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, + response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, + ) + return self._stubs['list_optimal_trials'] + + +__all__ = ( + 'VizierServiceGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py new file mode 100644 index 0000000000..d50ced74ac --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -0,0 +1,947 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .annotation import ( + Annotation, +) +from .annotation_spec import ( + AnnotationSpec, +) +from .artifact import ( + Artifact, +) +from .batch_prediction_job import ( + BatchPredictionJob, +) +from .completion_stats import ( + CompletionStats, +) +from .context import ( + Context, +) +from .custom_job import ( + ContainerSpec, + CustomJob, + CustomJobSpec, + PythonPackageSpec, + Scheduling, + WorkerPoolSpec, +) +from .data_item import ( + DataItem, +) +from .data_labeling_job import ( + ActiveLearningConfig, + DataLabelingJob, + SampleConfig, + TrainingConfig, +) +from .dataset import ( + Dataset, + ExportDataConfig, + ImportDataConfig, +) +from .dataset_service import ( + CreateDatasetOperationMetadata, + CreateDatasetRequest, + DeleteDatasetRequest, + ExportDataOperationMetadata, + ExportDataRequest, + ExportDataResponse, + GetAnnotationSpecRequest, + GetDatasetRequest, + ImportDataOperationMetadata, + ImportDataRequest, + ImportDataResponse, + ListAnnotationsRequest, + ListAnnotationsResponse, + ListDataItemsRequest, + ListDataItemsResponse, + ListDatasetsRequest, + ListDatasetsResponse, + UpdateDatasetRequest, +) +from .deployed_index_ref import ( + DeployedIndexRef, +) +from .deployed_model_ref import ( + DeployedModelRef, +) +from .encryption_spec import ( + EncryptionSpec, +) +from .endpoint import ( + DeployedModel, + Endpoint, +) +from .endpoint_service import ( + CreateEndpointOperationMetadata, + CreateEndpointRequest, + DeleteEndpointRequest, + DeployModelOperationMetadata, + DeployModelRequest, + DeployModelResponse, + GetEndpointRequest, + ListEndpointsRequest, + ListEndpointsResponse, + UndeployModelOperationMetadata, + UndeployModelRequest, + UndeployModelResponse, + UpdateEndpointRequest, +) +from .entity_type import ( + EntityType, +) +from .env_var import ( + EnvVar, +) +from .event import ( + Event, +) +from .execution import ( + Execution, +) +from .explanation import ( + Attribution, + Explanation, + ExplanationMetadataOverride, + ExplanationParameters, + ExplanationSpec, + ExplanationSpecOverride, + FeatureNoiseSigma, + IntegratedGradientsAttribution, + ModelExplanation, + SampledShapleyAttribution, + SmoothGradConfig, + XraiAttribution, +) +from .explanation_metadata import ( + ExplanationMetadata, +) +from .feature import ( + Feature, +) +from .feature_monitoring_stats import ( + FeatureStatsAnomaly, +) +from .feature_selector import ( + FeatureSelector, + IdMatcher, +) +from .featurestore import ( + Featurestore, +) +from .featurestore_monitoring import ( + FeaturestoreMonitoringConfig, +) +from .featurestore_online_service import ( + FeatureValue, + FeatureValueList, + ReadFeatureValuesRequest, + ReadFeatureValuesResponse, + StreamingReadFeatureValuesRequest, +) +from .featurestore_service import ( + BatchCreateFeaturesOperationMetadata, + BatchCreateFeaturesRequest, + BatchCreateFeaturesResponse, + BatchReadFeatureValuesOperationMetadata, + BatchReadFeatureValuesRequest, + BatchReadFeatureValuesResponse, + CreateEntityTypeOperationMetadata, + CreateEntityTypeRequest, + CreateFeatureOperationMetadata, + CreateFeatureRequest, + CreateFeaturestoreOperationMetadata, + CreateFeaturestoreRequest, + DeleteEntityTypeRequest, + DeleteFeatureRequest, + DeleteFeaturestoreRequest, + DestinationFeatureSetting, + ExportFeatureValuesOperationMetadata, + ExportFeatureValuesRequest, + ExportFeatureValuesResponse, + FeatureValueDestination, + GetEntityTypeRequest, + GetFeatureRequest, + GetFeaturestoreRequest, + ImportFeatureValuesOperationMetadata, + ImportFeatureValuesRequest, + ImportFeatureValuesResponse, + ListEntityTypesRequest, + ListEntityTypesResponse, + ListFeaturesRequest, + ListFeaturesResponse, + ListFeaturestoresRequest, + ListFeaturestoresResponse, + SearchFeaturesRequest, + SearchFeaturesResponse, + UpdateEntityTypeRequest, + UpdateFeatureRequest, + UpdateFeaturestoreOperationMetadata, + UpdateFeaturestoreRequest, +) +from .hyperparameter_tuning_job import ( + HyperparameterTuningJob, +) +from .index import ( + Index, +) +from .index_endpoint import ( + DeployedIndex, + DeployedIndexAuthConfig, + IndexEndpoint, + IndexPrivateEndpoints, +) +from .index_endpoint_service import ( + CreateIndexEndpointOperationMetadata, + CreateIndexEndpointRequest, + DeleteIndexEndpointRequest, + DeployIndexOperationMetadata, + DeployIndexRequest, + DeployIndexResponse, + GetIndexEndpointRequest, + ListIndexEndpointsRequest, + ListIndexEndpointsResponse, + UndeployIndexOperationMetadata, + UndeployIndexRequest, + UndeployIndexResponse, + UpdateIndexEndpointRequest, +) +from .index_service import ( + CreateIndexOperationMetadata, + CreateIndexRequest, + DeleteIndexRequest, + GetIndexRequest, + ListIndexesRequest, + ListIndexesResponse, + NearestNeighborSearchOperationMetadata, + UpdateIndexOperationMetadata, + UpdateIndexRequest, +) +from .io import ( + AvroSource, + BigQueryDestination, + BigQuerySource, + ContainerRegistryDestination, + CsvDestination, + CsvSource, + GcsDestination, + GcsSource, + TFRecordDestination, +) +from .job_service import ( + CancelBatchPredictionJobRequest, + CancelCustomJobRequest, + CancelDataLabelingJobRequest, + CancelHyperparameterTuningJobRequest, + CreateBatchPredictionJobRequest, + CreateCustomJobRequest, + CreateDataLabelingJobRequest, + CreateHyperparameterTuningJobRequest, + CreateModelDeploymentMonitoringJobRequest, + DeleteBatchPredictionJobRequest, + DeleteCustomJobRequest, + DeleteDataLabelingJobRequest, + DeleteHyperparameterTuningJobRequest, + DeleteModelDeploymentMonitoringJobRequest, + GetBatchPredictionJobRequest, + GetCustomJobRequest, + GetDataLabelingJobRequest, + GetHyperparameterTuningJobRequest, + GetModelDeploymentMonitoringJobRequest, + ListBatchPredictionJobsRequest, + ListBatchPredictionJobsResponse, + ListCustomJobsRequest, + ListCustomJobsResponse, + ListDataLabelingJobsRequest, + ListDataLabelingJobsResponse, + ListHyperparameterTuningJobsRequest, + ListHyperparameterTuningJobsResponse, + ListModelDeploymentMonitoringJobsRequest, + ListModelDeploymentMonitoringJobsResponse, + PauseModelDeploymentMonitoringJobRequest, + ResumeModelDeploymentMonitoringJobRequest, + SearchModelDeploymentMonitoringStatsAnomaliesRequest, + SearchModelDeploymentMonitoringStatsAnomaliesResponse, + UpdateModelDeploymentMonitoringJobOperationMetadata, + UpdateModelDeploymentMonitoringJobRequest, +) +from .lineage_subgraph import ( + LineageSubgraph, +) +from .machine_resources import ( + AutomaticResources, + AutoscalingMetricSpec, + BatchDedicatedResources, + DedicatedResources, + DiskSpec, + MachineSpec, + ResourcesConsumed, +) +from .manual_batch_tuning_parameters import ( + ManualBatchTuningParameters, +) +from .metadata_schema import ( + MetadataSchema, +) +from .metadata_service import ( + AddContextArtifactsAndExecutionsRequest, + AddContextArtifactsAndExecutionsResponse, + AddContextChildrenRequest, + AddContextChildrenResponse, + AddExecutionEventsRequest, + AddExecutionEventsResponse, + CreateArtifactRequest, + CreateContextRequest, + CreateExecutionRequest, + CreateMetadataSchemaRequest, + CreateMetadataStoreOperationMetadata, + CreateMetadataStoreRequest, + DeleteContextRequest, + DeleteMetadataStoreOperationMetadata, + DeleteMetadataStoreRequest, + GetArtifactRequest, + GetContextRequest, + GetExecutionRequest, + GetMetadataSchemaRequest, + GetMetadataStoreRequest, + ListArtifactsRequest, + ListArtifactsResponse, + ListContextsRequest, + ListContextsResponse, + ListExecutionsRequest, + ListExecutionsResponse, + ListMetadataSchemasRequest, + ListMetadataSchemasResponse, + ListMetadataStoresRequest, + ListMetadataStoresResponse, + QueryArtifactLineageSubgraphRequest, + QueryContextLineageSubgraphRequest, + QueryExecutionInputsAndOutputsRequest, + UpdateArtifactRequest, + UpdateContextRequest, + UpdateExecutionRequest, +) +from .metadata_store import ( + MetadataStore, +) +from .migratable_resource import ( + MigratableResource, +) +from .migration_service import ( + BatchMigrateResourcesOperationMetadata, + BatchMigrateResourcesRequest, + BatchMigrateResourcesResponse, + MigrateResourceRequest, + MigrateResourceResponse, + SearchMigratableResourcesRequest, + SearchMigratableResourcesResponse, +) +from .model import ( + Model, + ModelContainerSpec, + Port, + PredictSchemata, +) +from .model_deployment_monitoring_job import ( + ModelDeploymentMonitoringBigQueryTable, + ModelDeploymentMonitoringJob, + ModelDeploymentMonitoringObjectiveConfig, + ModelDeploymentMonitoringScheduleConfig, + ModelMonitoringStatsAnomalies, + ModelDeploymentMonitoringObjectiveType, +) +from .model_evaluation import ( + ModelEvaluation, +) +from .model_evaluation_slice import ( + ModelEvaluationSlice, +) +from .model_monitoring import ( + ModelMonitoringAlertConfig, + ModelMonitoringObjectiveConfig, + SamplingStrategy, + ThresholdConfig, +) +from .model_service import ( + DeleteModelRequest, + ExportModelOperationMetadata, + ExportModelRequest, + ExportModelResponse, + GetModelEvaluationRequest, + GetModelEvaluationSliceRequest, + GetModelRequest, + ListModelEvaluationSlicesRequest, + ListModelEvaluationSlicesResponse, + ListModelEvaluationsRequest, + ListModelEvaluationsResponse, + ListModelsRequest, + ListModelsResponse, + UpdateModelRequest, + UploadModelOperationMetadata, + UploadModelRequest, + UploadModelResponse, +) +from .operation import ( + DeleteOperationMetadata, + GenericOperationMetadata, +) +from .pipeline_job import ( + PipelineJob, + PipelineJobDetail, + PipelineTaskDetail, + PipelineTaskExecutorDetail, +) +from .pipeline_service import ( + CancelPipelineJobRequest, + CancelTrainingPipelineRequest, + CreatePipelineJobRequest, + CreateTrainingPipelineRequest, + DeletePipelineJobRequest, + DeleteTrainingPipelineRequest, + GetPipelineJobRequest, + GetTrainingPipelineRequest, + ListPipelineJobsRequest, + ListPipelineJobsResponse, + ListTrainingPipelinesRequest, + ListTrainingPipelinesResponse, +) +from .prediction_service import ( + ExplainRequest, + ExplainResponse, + PredictRequest, + PredictResponse, +) +from .specialist_pool import ( + SpecialistPool, +) +from .specialist_pool_service import ( + CreateSpecialistPoolOperationMetadata, + CreateSpecialistPoolRequest, + DeleteSpecialistPoolRequest, + GetSpecialistPoolRequest, + ListSpecialistPoolsRequest, + ListSpecialistPoolsResponse, + UpdateSpecialistPoolOperationMetadata, + UpdateSpecialistPoolRequest, +) +from .study import ( + Measurement, + Study, + StudySpec, + Trial, +) +from .tensorboard import ( + Tensorboard, +) +from .tensorboard_data import ( + Scalar, + TensorboardBlob, + TensorboardBlobSequence, + TensorboardTensor, + TimeSeriesData, + TimeSeriesDataPoint, +) +from .tensorboard_experiment import ( + TensorboardExperiment, +) +from .tensorboard_run import ( + TensorboardRun, +) +from .tensorboard_service import ( + CreateTensorboardExperimentRequest, + CreateTensorboardOperationMetadata, + CreateTensorboardRequest, + CreateTensorboardRunRequest, + CreateTensorboardTimeSeriesRequest, + DeleteTensorboardExperimentRequest, + DeleteTensorboardRequest, + DeleteTensorboardRunRequest, + DeleteTensorboardTimeSeriesRequest, + ExportTensorboardTimeSeriesDataRequest, + ExportTensorboardTimeSeriesDataResponse, + GetTensorboardExperimentRequest, + GetTensorboardRequest, + GetTensorboardRunRequest, + GetTensorboardTimeSeriesRequest, + ListTensorboardExperimentsRequest, + ListTensorboardExperimentsResponse, + ListTensorboardRunsRequest, + ListTensorboardRunsResponse, + ListTensorboardsRequest, + ListTensorboardsResponse, + ListTensorboardTimeSeriesRequest, + ListTensorboardTimeSeriesResponse, + ReadTensorboardBlobDataRequest, + ReadTensorboardBlobDataResponse, + ReadTensorboardTimeSeriesDataRequest, + ReadTensorboardTimeSeriesDataResponse, + UpdateTensorboardExperimentRequest, + UpdateTensorboardOperationMetadata, + UpdateTensorboardRequest, + UpdateTensorboardRunRequest, + UpdateTensorboardTimeSeriesRequest, + WriteTensorboardRunDataRequest, + WriteTensorboardRunDataResponse, +) +from .tensorboard_time_series import ( + TensorboardTimeSeries, +) +from .training_pipeline import ( + FilterSplit, + FractionSplit, + InputDataConfig, + PredefinedSplit, + TimestampSplit, + TrainingPipeline, +) +from .types import ( + BoolArray, + DoubleArray, + Int64Array, + StringArray, +) +from .user_action_reference import ( + UserActionReference, +) +from .value import ( + Value, +) +from .vizier_service import ( + AddTrialMeasurementRequest, + CheckTrialEarlyStoppingStateMetatdata, + CheckTrialEarlyStoppingStateRequest, + CheckTrialEarlyStoppingStateResponse, + CompleteTrialRequest, + CreateStudyRequest, + CreateTrialRequest, + DeleteStudyRequest, + DeleteTrialRequest, + GetStudyRequest, + GetTrialRequest, + ListOptimalTrialsRequest, + ListOptimalTrialsResponse, + ListStudiesRequest, + ListStudiesResponse, + ListTrialsRequest, + ListTrialsResponse, + LookupStudyRequest, + StopTrialRequest, + SuggestTrialsMetadata, + SuggestTrialsRequest, + SuggestTrialsResponse, +) + +__all__ = ( + 'AcceleratorType', + 'Annotation', + 'AnnotationSpec', + 'Artifact', + 'BatchPredictionJob', + 'CompletionStats', + 'Context', + 'ContainerSpec', + 'CustomJob', + 'CustomJobSpec', + 'PythonPackageSpec', + 'Scheduling', + 'WorkerPoolSpec', + 'DataItem', + 'ActiveLearningConfig', + 'DataLabelingJob', + 'SampleConfig', + 'TrainingConfig', + 'Dataset', + 'ExportDataConfig', + 'ImportDataConfig', + 'CreateDatasetOperationMetadata', + 'CreateDatasetRequest', + 'DeleteDatasetRequest', + 'ExportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'GetAnnotationSpecRequest', + 'GetDatasetRequest', + 'ImportDataOperationMetadata', + 'ImportDataRequest', + 'ImportDataResponse', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'UpdateDatasetRequest', + 'DeployedIndexRef', + 'DeployedModelRef', + 'EncryptionSpec', + 'DeployedModel', + 'Endpoint', + 'CreateEndpointOperationMetadata', + 'CreateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelOperationMetadata', + 'DeployModelRequest', + 'DeployModelResponse', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UndeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UpdateEndpointRequest', + 'EntityType', + 'EnvVar', + 'Event', + 'Execution', + 'Attribution', + 'Explanation', + 'ExplanationMetadataOverride', + 'ExplanationParameters', + 'ExplanationSpec', + 'ExplanationSpecOverride', + 'FeatureNoiseSigma', + 'IntegratedGradientsAttribution', + 'ModelExplanation', + 'SampledShapleyAttribution', + 'SmoothGradConfig', + 'XraiAttribution', + 'ExplanationMetadata', + 'Feature', + 'FeatureStatsAnomaly', + 'FeatureSelector', + 'IdMatcher', + 'Featurestore', + 'FeaturestoreMonitoringConfig', + 'FeatureValue', + 'FeatureValueList', + 'ReadFeatureValuesRequest', + 'ReadFeatureValuesResponse', + 'StreamingReadFeatureValuesRequest', + 'BatchCreateFeaturesOperationMetadata', + 'BatchCreateFeaturesRequest', + 'BatchCreateFeaturesResponse', + 'BatchReadFeatureValuesOperationMetadata', + 'BatchReadFeatureValuesRequest', + 'BatchReadFeatureValuesResponse', + 'CreateEntityTypeOperationMetadata', + 'CreateEntityTypeRequest', + 'CreateFeatureOperationMetadata', + 'CreateFeatureRequest', + 'CreateFeaturestoreOperationMetadata', + 'CreateFeaturestoreRequest', + 'DeleteEntityTypeRequest', + 'DeleteFeatureRequest', + 'DeleteFeaturestoreRequest', + 'DestinationFeatureSetting', + 'ExportFeatureValuesOperationMetadata', + 'ExportFeatureValuesRequest', + 'ExportFeatureValuesResponse', + 'FeatureValueDestination', + 'GetEntityTypeRequest', + 'GetFeatureRequest', + 'GetFeaturestoreRequest', + 'ImportFeatureValuesOperationMetadata', + 'ImportFeatureValuesRequest', + 'ImportFeatureValuesResponse', + 'ListEntityTypesRequest', + 'ListEntityTypesResponse', + 'ListFeaturesRequest', + 'ListFeaturesResponse', + 'ListFeaturestoresRequest', + 'ListFeaturestoresResponse', + 'SearchFeaturesRequest', + 'SearchFeaturesResponse', + 'UpdateEntityTypeRequest', + 'UpdateFeatureRequest', + 'UpdateFeaturestoreOperationMetadata', + 'UpdateFeaturestoreRequest', + 'HyperparameterTuningJob', + 'Index', + 'DeployedIndex', + 'DeployedIndexAuthConfig', + 'IndexEndpoint', + 'IndexPrivateEndpoints', + 'CreateIndexEndpointOperationMetadata', + 'CreateIndexEndpointRequest', + 'DeleteIndexEndpointRequest', + 'DeployIndexOperationMetadata', + 'DeployIndexRequest', + 'DeployIndexResponse', + 'GetIndexEndpointRequest', + 'ListIndexEndpointsRequest', + 'ListIndexEndpointsResponse', + 'UndeployIndexOperationMetadata', + 'UndeployIndexRequest', + 'UndeployIndexResponse', + 'UpdateIndexEndpointRequest', + 'CreateIndexOperationMetadata', + 'CreateIndexRequest', + 'DeleteIndexRequest', + 'GetIndexRequest', + 'ListIndexesRequest', + 'ListIndexesResponse', + 'NearestNeighborSearchOperationMetadata', + 'UpdateIndexOperationMetadata', + 'UpdateIndexRequest', + 'AvroSource', + 'BigQueryDestination', + 'BigQuerySource', + 'ContainerRegistryDestination', + 'CsvDestination', + 'CsvSource', + 'GcsDestination', + 'GcsSource', + 'TFRecordDestination', + 'CancelBatchPredictionJobRequest', + 'CancelCustomJobRequest', + 'CancelDataLabelingJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'CreateCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'CreateModelDeploymentMonitoringJobRequest', + 'DeleteBatchPredictionJobRequest', + 'DeleteCustomJobRequest', + 'DeleteDataLabelingJobRequest', + 'DeleteHyperparameterTuningJobRequest', + 'DeleteModelDeploymentMonitoringJobRequest', + 'GetBatchPredictionJobRequest', + 'GetCustomJobRequest', + 'GetDataLabelingJobRequest', + 'GetHyperparameterTuningJobRequest', + 'GetModelDeploymentMonitoringJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'ListModelDeploymentMonitoringJobsRequest', + 'ListModelDeploymentMonitoringJobsResponse', + 'PauseModelDeploymentMonitoringJobRequest', + 'ResumeModelDeploymentMonitoringJobRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', + 'UpdateModelDeploymentMonitoringJobOperationMetadata', + 'UpdateModelDeploymentMonitoringJobRequest', + 'JobState', + 'LineageSubgraph', + 'AutomaticResources', + 'AutoscalingMetricSpec', + 'BatchDedicatedResources', + 'DedicatedResources', + 'DiskSpec', + 'MachineSpec', + 'ResourcesConsumed', + 'ManualBatchTuningParameters', + 'MetadataSchema', + 'AddContextArtifactsAndExecutionsRequest', + 'AddContextArtifactsAndExecutionsResponse', + 'AddContextChildrenRequest', + 'AddContextChildrenResponse', + 'AddExecutionEventsRequest', + 'AddExecutionEventsResponse', + 'CreateArtifactRequest', + 'CreateContextRequest', + 'CreateExecutionRequest', + 'CreateMetadataSchemaRequest', + 'CreateMetadataStoreOperationMetadata', + 'CreateMetadataStoreRequest', + 'DeleteContextRequest', + 'DeleteMetadataStoreOperationMetadata', + 'DeleteMetadataStoreRequest', + 'GetArtifactRequest', + 'GetContextRequest', + 'GetExecutionRequest', + 'GetMetadataSchemaRequest', + 'GetMetadataStoreRequest', + 'ListArtifactsRequest', + 'ListArtifactsResponse', + 'ListContextsRequest', + 'ListContextsResponse', + 'ListExecutionsRequest', + 'ListExecutionsResponse', + 'ListMetadataSchemasRequest', + 'ListMetadataSchemasResponse', + 'ListMetadataStoresRequest', + 'ListMetadataStoresResponse', + 'QueryArtifactLineageSubgraphRequest', + 'QueryContextLineageSubgraphRequest', + 'QueryExecutionInputsAndOutputsRequest', + 'UpdateArtifactRequest', + 'UpdateContextRequest', + 'UpdateExecutionRequest', + 'MetadataStore', + 'MigratableResource', + 'BatchMigrateResourcesOperationMetadata', + 'BatchMigrateResourcesRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceRequest', + 'MigrateResourceResponse', + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'Model', + 'ModelContainerSpec', + 'Port', + 'PredictSchemata', + 'ModelDeploymentMonitoringBigQueryTable', + 'ModelDeploymentMonitoringJob', + 'ModelDeploymentMonitoringObjectiveConfig', + 'ModelDeploymentMonitoringScheduleConfig', + 'ModelMonitoringStatsAnomalies', + 'ModelDeploymentMonitoringObjectiveType', + 'ModelEvaluation', + 'ModelEvaluationSlice', + 'ModelMonitoringAlertConfig', + 'ModelMonitoringObjectiveConfig', + 'SamplingStrategy', + 'ThresholdConfig', + 'DeleteModelRequest', + 'ExportModelOperationMetadata', + 'ExportModelRequest', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'GetModelEvaluationSliceRequest', + 'GetModelRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'ListModelsRequest', + 'ListModelsResponse', + 'UpdateModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelRequest', + 'UploadModelResponse', + 'DeleteOperationMetadata', + 'GenericOperationMetadata', + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + 'CancelPipelineJobRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'CreateTrainingPipelineRequest', + 'DeletePipelineJobRequest', + 'DeleteTrainingPipelineRequest', + 'GetPipelineJobRequest', + 'GetTrainingPipelineRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'PipelineState', + 'ExplainRequest', + 'ExplainResponse', + 'PredictRequest', + 'PredictResponse', + 'SpecialistPool', + 'CreateSpecialistPoolOperationMetadata', + 'CreateSpecialistPoolRequest', + 'DeleteSpecialistPoolRequest', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'UpdateSpecialistPoolOperationMetadata', + 'UpdateSpecialistPoolRequest', + 'Measurement', + 'Study', + 'StudySpec', + 'Trial', + 'Tensorboard', + 'Scalar', + 'TensorboardBlob', + 'TensorboardBlobSequence', + 'TensorboardTensor', + 'TimeSeriesData', + 'TimeSeriesDataPoint', + 'TensorboardExperiment', + 'TensorboardRun', + 'CreateTensorboardExperimentRequest', + 'CreateTensorboardOperationMetadata', + 'CreateTensorboardRequest', + 'CreateTensorboardRunRequest', + 'CreateTensorboardTimeSeriesRequest', + 'DeleteTensorboardExperimentRequest', + 'DeleteTensorboardRequest', + 'DeleteTensorboardRunRequest', + 'DeleteTensorboardTimeSeriesRequest', + 'ExportTensorboardTimeSeriesDataRequest', + 'ExportTensorboardTimeSeriesDataResponse', + 'GetTensorboardExperimentRequest', + 'GetTensorboardRequest', + 'GetTensorboardRunRequest', + 'GetTensorboardTimeSeriesRequest', + 'ListTensorboardExperimentsRequest', + 'ListTensorboardExperimentsResponse', + 'ListTensorboardRunsRequest', + 'ListTensorboardRunsResponse', + 'ListTensorboardsRequest', + 'ListTensorboardsResponse', + 'ListTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesResponse', + 'ReadTensorboardBlobDataRequest', + 'ReadTensorboardBlobDataResponse', + 'ReadTensorboardTimeSeriesDataRequest', + 'ReadTensorboardTimeSeriesDataResponse', + 'UpdateTensorboardExperimentRequest', + 'UpdateTensorboardOperationMetadata', + 'UpdateTensorboardRequest', + 'UpdateTensorboardRunRequest', + 'UpdateTensorboardTimeSeriesRequest', + 'WriteTensorboardRunDataRequest', + 'WriteTensorboardRunDataResponse', + 'TensorboardTimeSeries', + 'FilterSplit', + 'FractionSplit', + 'InputDataConfig', + 'PredefinedSplit', + 'TimestampSplit', + 'TrainingPipeline', + 'BoolArray', + 'DoubleArray', + 'Int64Array', + 'StringArray', + 'UserActionReference', + 'Value', + 'AddTrialMeasurementRequest', + 'CheckTrialEarlyStoppingStateMetatdata', + 'CheckTrialEarlyStoppingStateRequest', + 'CheckTrialEarlyStoppingStateResponse', + 'CompleteTrialRequest', + 'CreateStudyRequest', + 'CreateTrialRequest', + 'DeleteStudyRequest', + 'DeleteTrialRequest', + 'GetStudyRequest', + 'GetTrialRequest', + 'ListOptimalTrialsRequest', + 'ListOptimalTrialsResponse', + 'ListStudiesRequest', + 'ListStudiesResponse', + 'ListTrialsRequest', + 'ListTrialsResponse', + 'LookupStudyRequest', + 'StopTrialRequest', + 'SuggestTrialsMetadata', + 'SuggestTrialsRequest', + 'SuggestTrialsResponse', +) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py new file mode 100644 index 0000000000..3e2b8a46f4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'AcceleratorType', + }, +) + + +class AcceleratorType(proto.Enum): + r"""Represents a hardware accelerator type.""" + ACCELERATOR_TYPE_UNSPECIFIED = 0 + NVIDIA_TESLA_K80 = 1 + NVIDIA_TESLA_P100 = 2 + NVIDIA_TESLA_V100 = 3 + NVIDIA_TESLA_P4 = 4 + NVIDIA_TESLA_T4 = 5 + NVIDIA_TESLA_A100 = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py new file mode 100644 index 0000000000..526bcad4d2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import user_action_reference +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Annotation', + }, +) + + +class Annotation(proto.Message): + r"""Used to assign specific AnnotationSpec to a particular area + of a DataItem or the whole part of the DataItem. + + Attributes: + name (str): + Output only. Resource name of the Annotation. + payload_schema_uri (str): + Required. Google Cloud Storage URI points to a YAML file + describing + [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. + The schema is defined as an `OpenAPI 3.0.2 Schema + Object `__. + The schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/, + note that the chosen schema must be consistent with the + parent Dataset's + [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]. + payload (google.protobuf.struct_pb2.Value): + Required. The schema of the payload can be found in + [payload_schema][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Annotation + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Annotation + was last updated. + etag (str): + Optional. Used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + annotation_source (google.cloud.aiplatform_v1beta1.types.UserActionReference): + Output only. The source of the Annotation. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Annotation.LabelsEntry]): + Optional. The labels with user-defined metadata to organize + your Annotations. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Annotation(System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Annotation: + + - "aiplatform.googleapis.com/annotation_set_name": + optional, name of the UI's annotation set this Annotation + belongs to. If not set, the Annotation is not visible in + the UI. + + - "aiplatform.googleapis.com/payload_schema": output only, + its value is the + [payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri] + title. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + payload_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + payload = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + annotation_source = proto.Field( + proto.MESSAGE, + number=5, + message=user_action_reference.UserActionReference, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py new file mode 100644 index 0000000000..a254682a5c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'AnnotationSpec', + }, +) + + +class AnnotationSpec(proto.Message): + r"""Identifies a concept with which DataItems may be annotated + with. + + Attributes: + name (str): + Output only. Resource name of the + AnnotationSpec. + display_name (str): + Required. The user-defined name of the + AnnotationSpec. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + AnnotationSpec was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when AnnotationSpec + was last updated. + etag (str): + Optional. Used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py new file mode 100644 index 0000000000..a7275436e3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Artifact', + }, +) + + +class Artifact(proto.Message): + r"""Instance of a general artifact. + Attributes: + name (str): + Output only. The resource name of the + Artifact. + display_name (str): + User provided display name of the Artifact. + May be up to 128 Unicode characters. + uri (str): + The uniform resource identifier of the + artifact file. May be empty if there is no + actual artifact file. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact.LabelsEntry]): + The labels with user-defined metadata to + organize your Artifacts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Artifact (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Artifact was + last updated. + state (google.cloud.aiplatform_v1beta1.types.Artifact.State): + The state of this Artifact. This is a + property of the Artifact, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex Pipelines), + and the system does not prescribe or check the + validity of state transitions. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in schema_name to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Artifact. + description (str): + Description of the Artifact + """ + class State(proto.Enum): + r"""Describes the state of the Artifact.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + LIVE = 2 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + uri = proto.Field( + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=10, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + state = proto.Field( + proto.ENUM, + number=13, + enum=State, + ) + schema_title = proto.Field( + proto.STRING, + number=14, + ) + schema_version = proto.Field( + proto.STRING, + number=15, + ) + metadata = proto.Field( + proto.MESSAGE, + number=16, + message=struct_pb2.Struct, + ) + description = proto.Field( + proto.STRING, + number=17, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py new file mode 100644 index 0000000000..a03181e64d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import completion_stats as gca_completion_stats +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'BatchPredictionJob', + }, +) + + +class BatchPredictionJob(proto.Message): + r"""A job that uses a + [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to + produce predictions on multiple [input + instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If predictions for significant portion of the instances fail, the + job may finish without attempting predictions for all remaining + instances. + + Attributes: + name (str): + Output only. Resource name of the + BatchPredictionJob. + display_name (str): + Required. The user-defined name of this + BatchPredictionJob. + model (str): + Required. The name of the Model that produces + the predictions via this job, must share the + same ancestor Location. Starting this job has no + impact on any existing deployments of the Model + and their resources. + input_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.InputConfig): + Required. Input configuration of the instances on which + predictions are performed. The schema of any single instance + may be specified via the + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + model_parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the predictions. The schema of + the parameters may be specified via the + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + output_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputConfig): + Required. The Configuration specifying where output + predictions should be written. The schema of any single + prediction may be specified as a concatenation of + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + and + [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. + dedicated_resources (google.cloud.aiplatform_v1beta1.types.BatchDedicatedResources): + The config of resources used by the Model during the batch + prediction. If the Model + [supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types] + DEDICATED_RESOURCES this config may be provided (and the job + will use these resources), if the Model doesn't support + AUTOMATIC_RESOURCES, this config must be provided. + manual_batch_tuning_parameters (google.cloud.aiplatform_v1beta1.types.ManualBatchTuningParameters): + Immutable. Parameters configuring the batch behavior. + Currently only applicable when + [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] + are used (in other cases Vertex AI does the tuning itself). + generate_explanation (bool): + Generate explanation with the batch prediction results. + + When set to ``true``, the batch prediction output changes + based on the ``predictions_format`` field of the + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config] + object: + + - ``bigquery``: output includes a column named + ``explanation``. The value is a struct that conforms to + the + [Explanation][google.cloud.aiplatform.v1beta1.Explanation] + object. + - ``jsonl``: The JSON objects on each line include an + additional entry keyed ``explanation``. The value of the + entry is a JSON object that conforms to the + [Explanation][google.cloud.aiplatform.v1beta1.Explanation] + object. + - ``csv``: Generating explanations for CSV format is not + supported. + + If this field is set to true, either the + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + or + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + must be populated. + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + Explanation configuration for this BatchPredictionJob. Can + be specified only if + [generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation] + is set to ``true``. + + This value overrides the value of + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. + All fields of + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + are optional in the request. If a field of the + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + object is not populated, the corresponding field of the + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + object is inherited. + output_info (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputInfo): + Output only. Information further describing + the output of this job. + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the job. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the job's state is + JOB_STATE_FAILED or JOB_STATE_CANCELLED. + partial_failures (Sequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + For example, single files that can't be read. + This field never exceeds 20 entries. + Status details fields contain standard GCP error + details. + resources_consumed (google.cloud.aiplatform_v1beta1.types.ResourcesConsumed): + Output only. Information about resources that + had been consumed by this job. Provided in real + time at best effort basis, as well as a final + value once the job completes. + + Note: This field currently may be not populated + for batch predictions that use AutoML Models. + completion_stats (google.cloud.aiplatform_v1beta1.types.CompletionStats): + Output only. Statistics on completed and + failed prediction instances. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob + was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob for the first + time entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob entered any of + the following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the BatchPredictionJob + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.LabelsEntry]): + The labels with user-defined metadata to + organize BatchPredictionJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key options for a + BatchPredictionJob. If this is set, then all + resources created by the BatchPredictionJob will + be encrypted with the provided encryption key. + """ + + class InputConfig(proto.Message): + r"""Configures the input to + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + See + [Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] + for Model's supported input formats, and how instances should be + expressed via any of them. + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + The Cloud Storage location for the input + instances. + bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): + The BigQuery location of the input table. + The schema of the table should be in the format + described by the given context OpenAPI Schema, + if one is provided. The table may contain + additional columns that are not described by the + schema, and they will be ignored. + instances_format (str): + Required. The format in which instances are given, must be + one of the + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=2, + oneof='source', + message=io.GcsSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=3, + oneof='source', + message=io.BigQuerySource, + ) + instances_format = proto.Field( + proto.STRING, + number=1, + ) + + class OutputConfig(proto.Message): + r"""Configures the output of + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + See + [Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats] + for supported output formats, and how predictions are expressed via + any of them. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Cloud Storage location of the directory where the output + is to be written to. In the given directory a new directory + is created. Its name is + ``prediction--``, where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. + Inside of it files ``predictions_0001.``, + ``predictions_0002.``, ..., + ``predictions_N.`` are created where + ```` depends on chosen + [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format], + and N may equal 0001 and depends on the total number of + successfully predicted instances. If the Model has both + [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] + schemata defined then each such file contains predictions as + per the + [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format]. + If prediction for any instance failed (partially or + completely), then an additional ``errors_0001.``, + ``errors_0002.``,..., ``errors_N.`` + files are created (N depends on total number of failed + predictions). These files contain the failed instances, as + per their schema, followed by an additional ``error`` field + which as value has ```google.rpc.Status`` `__ + containing only ``code`` and ``message`` fields. + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + The BigQuery project or dataset location where the output is + to be written to. If project is provided, a new dataset is + created with name + ``prediction__`` where + is made BigQuery-dataset-name compatible (for example, most + special characters become underscores), and timestamp is in + YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the + dataset two tables will be created, ``predictions``, and + ``errors``. If the Model has both + [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + and + [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] + schemata defined then the tables have columns as follows: + The ``predictions`` table contains instances for which the + prediction succeeded, it has columns as per a concatenation + of the Model's instance and prediction schemata. The + ``errors`` table contains rows for which the prediction has + failed, it has instance columns, as per the instance schema, + followed by a single "errors" column, which as values has + ```google.rpc.Status`` `__ represented as a STRUCT, + and containing only ``code`` and ``message``. + predictions_format (str): + Required. The format in which Vertex AI gives the + predictions, must be one of the + [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message=io.GcsDestination, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message=io.BigQueryDestination, + ) + predictions_format = proto.Field( + proto.STRING, + number=1, + ) + + class OutputInfo(proto.Message): + r"""Further describes this job's output. Supplements + [output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + + Attributes: + gcs_output_directory (str): + Output only. The full path of the Cloud + Storage directory created, into which the + prediction output is written. + bigquery_output_dataset (str): + Output only. The path of the BigQuery dataset created, in + ``bq://projectId.bqDatasetId`` format, into which the + prediction output is written. + """ + + gcs_output_directory = proto.Field( + proto.STRING, + number=1, + oneof='output_location', + ) + bigquery_output_dataset = proto.Field( + proto.STRING, + number=2, + oneof='output_location', + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + model = proto.Field( + proto.STRING, + number=3, + ) + input_config = proto.Field( + proto.MESSAGE, + number=4, + message=InputConfig, + ) + model_parameters = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.Value, + ) + output_config = proto.Field( + proto.MESSAGE, + number=6, + message=OutputConfig, + ) + dedicated_resources = proto.Field( + proto.MESSAGE, + number=7, + message=machine_resources.BatchDedicatedResources, + ) + manual_batch_tuning_parameters = proto.Field( + proto.MESSAGE, + number=8, + message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters, + ) + generate_explanation = proto.Field( + proto.BOOL, + number=23, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=25, + message=explanation.ExplanationSpec, + ) + output_info = proto.Field( + proto.MESSAGE, + number=9, + message=OutputInfo, + ) + state = proto.Field( + proto.ENUM, + number=10, + enum=job_state.JobState, + ) + error = proto.Field( + proto.MESSAGE, + number=11, + message=status_pb2.Status, + ) + partial_failures = proto.RepeatedField( + proto.MESSAGE, + number=12, + message=status_pb2.Status, + ) + resources_consumed = proto.Field( + proto.MESSAGE, + number=13, + message=machine_resources.ResourcesConsumed, + ) + completion_stats = proto.Field( + proto.MESSAGE, + number=14, + message=gca_completion_stats.CompletionStats, + ) + create_time = proto.Field( + proto.MESSAGE, + number=15, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=16, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=17, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=18, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=19, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=24, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py new file mode 100644 index 0000000000..3d8055f95a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CompletionStats', + }, +) + + +class CompletionStats(proto.Message): + r"""Success and error statistics of processing multiple entities + (for example, DataItems or structured data rows) in batch. + + Attributes: + successful_count (int): + Output only. The number of entities that had + been processed successfully. + failed_count (int): + Output only. The number of entities for which + any error was encountered. + incomplete_count (int): + Output only. In cases when enough errors are + encountered a job, pipeline, or operation may be + failed as a whole. Below is the number of + entities for which the processing had not been + finished (either in successful or failed state). + Set to -1 if the number is unknown (for example, + the operation failed before the total entity + number could be collected). + """ + + successful_count = proto.Field( + proto.INT64, + number=1, + ) + failed_count = proto.Field( + proto.INT64, + number=2, + ) + incomplete_count = proto.Field( + proto.INT64, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py new file mode 100644 index 0000000000..607b44cee0 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Context', + }, +) + + +class Context(proto.Message): + r"""Instance of a general context. + Attributes: + name (str): + Output only. The resource name of the + Context. + display_name (str): + User provided display name of the Context. + May be up to 128 Unicode characters. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Context.LabelsEntry]): + The labels with user-defined metadata to + organize your Contexts. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Context (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Context was + last updated. + parent_contexts (Sequence[str]): + Output only. A list of resource names of Contexts that are + parents of this Context. A Context may have at most 10 + parent_contexts. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in schema_name to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Context. + description (str): + Description of the Context + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=9, + ) + create_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + parent_contexts = proto.RepeatedField( + proto.STRING, + number=12, + ) + schema_title = proto.Field( + proto.STRING, + number=13, + ) + schema_version = proto.Field( + proto.STRING, + number=14, + ) + metadata = proto.Field( + proto.MESSAGE, + number=15, + message=struct_pb2.Struct, + ) + description = proto.Field( + proto.STRING, + number=16, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py new file mode 100644 index 0000000000..1d7aef0b77 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CustomJob', + 'CustomJobSpec', + 'WorkerPoolSpec', + 'ContainerSpec', + 'PythonPackageSpec', + 'Scheduling', + }, +) + + +class CustomJob(proto.Message): + r"""Represents a job that runs custom workloads such as a Docker + container or a Python package. A CustomJob can have multiple + worker pools and each worker pool can have its own machine and + input spec. A CustomJob will be cleaned up once the job enters + terminal state (failed or succeeded). + + Attributes: + name (str): + Output only. Resource name of a CustomJob. + display_name (str): + Required. The display name of the CustomJob. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): + Required. Job spec. + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the job. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob was + created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob for the first time + entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob entered any of the + following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the CustomJob was most + recently updated. + error (google.rpc.status_pb2.Status): + Output only. Only populated when job's state is + ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob.LabelsEntry]): + The labels with user-defined metadata to + organize CustomJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key options for a + CustomJob. If this is set, then all resources + created by the CustomJob will be encrypted with + the provided encryption key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + job_spec = proto.Field( + proto.MESSAGE, + number=4, + message='CustomJobSpec', + ) + state = proto.Field( + proto.ENUM, + number=5, + enum=job_state.JobState, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=12, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class CustomJobSpec(proto.Message): + r"""Represents the spec of a CustomJob. + Attributes: + worker_pool_specs (Sequence[google.cloud.aiplatform_v1beta1.types.WorkerPoolSpec]): + Required. The spec of the worker pools + including machine type and Docker image. All + worker pools except the first one are optional + and can be skipped by providing an empty value. + scheduling (google.cloud.aiplatform_v1beta1.types.Scheduling): + Scheduling options for a CustomJob. + service_account (str): + Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this + run-as account. If unspecified, the `AI Platform Custom Code + Service + Agent `__ + for the CustomJob's project is used. + network (str): + The full name of the Compute Engine + `network `__ + to which the Job should be peered. For example, + ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + Private services access must already be configured for the + network. If left unspecified, the job is not peered with any + network. + base_output_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Cloud Storage location to store the output of this + CustomJob or HyperparameterTuningJob. For + HyperparameterTuningJob, the baseOutputDirectory of each + child CustomJob backing a Trial is set to a subdirectory of + name [id][google.cloud.aiplatform.v1beta1.Trial.id] under + its parent HyperparameterTuningJob's baseOutputDirectory. + + The following Vertex AI environment variables will be passed + to containers or python modules when this field is set: + + For CustomJob: + + - AIP_MODEL_DIR = ``/model/`` + - AIP_CHECKPOINT_DIR = + ``/checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``/logs/`` + + For CustomJob backing a Trial of HyperparameterTuningJob: + + - AIP_MODEL_DIR = + ``//model/`` + - AIP_CHECKPOINT_DIR = + ``//checkpoints/`` + - AIP_TENSORBOARD_LOG_DIR = + ``//logs/`` + tensorboard (str): + Optional. The name of a Vertex AI + [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] + resource to which this CustomJob will upload Tensorboard + logs. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + worker_pool_specs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='WorkerPoolSpec', + ) + scheduling = proto.Field( + proto.MESSAGE, + number=3, + message='Scheduling', + ) + service_account = proto.Field( + proto.STRING, + number=4, + ) + network = proto.Field( + proto.STRING, + number=5, + ) + base_output_directory = proto.Field( + proto.MESSAGE, + number=6, + message=io.GcsDestination, + ) + tensorboard = proto.Field( + proto.STRING, + number=7, + ) + + +class WorkerPoolSpec(proto.Message): + r"""Represents the spec of a worker pool in a job. + Attributes: + container_spec (google.cloud.aiplatform_v1beta1.types.ContainerSpec): + The custom container task. + python_package_spec (google.cloud.aiplatform_v1beta1.types.PythonPackageSpec): + The Python packaged task. + machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): + Optional. Immutable. The specification of a + single machine. + replica_count (int): + Optional. The number of worker replicas to + use for this worker pool. + disk_spec (google.cloud.aiplatform_v1beta1.types.DiskSpec): + Disk spec. + """ + + container_spec = proto.Field( + proto.MESSAGE, + number=6, + oneof='task', + message='ContainerSpec', + ) + python_package_spec = proto.Field( + proto.MESSAGE, + number=7, + oneof='task', + message='PythonPackageSpec', + ) + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message=machine_resources.MachineSpec, + ) + replica_count = proto.Field( + proto.INT64, + number=2, + ) + disk_spec = proto.Field( + proto.MESSAGE, + number=5, + message=machine_resources.DiskSpec, + ) + + +class ContainerSpec(proto.Message): + r"""The spec of a Container. + Attributes: + image_uri (str): + Required. The URI of a container image in the + Container Registry that is to be run on each + worker replica. + command (Sequence[str]): + The command to be invoked when the container + is started. It overrides the entrypoint + instruction in Dockerfile when provided. + args (Sequence[str]): + The arguments to be passed when starting the + container. + """ + + image_uri = proto.Field( + proto.STRING, + number=1, + ) + command = proto.RepeatedField( + proto.STRING, + number=2, + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class PythonPackageSpec(proto.Message): + r"""The spec of a Python packaged code. + Attributes: + executor_image_uri (str): + Required. The URI of a container image in Artifact Registry + that will run the provided Python package. Vertex AI + provides a wide range of executor images with pre-installed + packages to meet users' various use cases. See the list of + `pre-built containers for + training `__. + You must use an image from this list. + package_uris (Sequence[str]): + Required. The Google Cloud Storage location + of the Python package files which are the + training program and its dependent packages. The + maximum number of package URIs is 100. + python_module (str): + Required. The Python module name to run after + installing the packages. + args (Sequence[str]): + Command line arguments to be passed to the + Python task. + """ + + executor_image_uri = proto.Field( + proto.STRING, + number=1, + ) + package_uris = proto.RepeatedField( + proto.STRING, + number=2, + ) + python_module = proto.Field( + proto.STRING, + number=3, + ) + args = proto.RepeatedField( + proto.STRING, + number=4, + ) + + +class Scheduling(proto.Message): + r"""All parameters related to queuing and scheduling of custom + jobs. + + Attributes: + timeout (google.protobuf.duration_pb2.Duration): + The maximum job running time. The default is + 7 days. + restart_job_on_worker_restart (bool): + Restarts the entire CustomJob if a worker + gets restarted. This feature can be used by + distributed training jobs that are not resilient + to workers leaving and joining a job. + """ + + timeout = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + restart_job_on_worker_restart = proto.Field( + proto.BOOL, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py new file mode 100644 index 0000000000..c638c0e00d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'DataItem', + }, +) + + +class DataItem(proto.Message): + r"""A piece of data in a Dataset. Could be an image, a video, a + document or plain text. + + Attributes: + name (str): + Output only. The resource name of the + DataItem. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this DataItem was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this DataItem was + last updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataItem.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your DataItems. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one DataItem(System labels are + excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + payload (google.protobuf.struct_pb2.Value): + Required. The data that the DataItem represents (for + example, an image or a text snippet). The schema of the + payload is stored in the parent Dataset's [metadata + schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] + dataItemSchemaUri field. + etag (str): + Optional. Used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=3, + ) + payload = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py new file mode 100644 index 0000000000..27f661c920 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py @@ -0,0 +1,332 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import job_state +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'DataLabelingJob', + 'ActiveLearningConfig', + 'SampleConfig', + 'TrainingConfig', + }, +) + + +class DataLabelingJob(proto.Message): + r"""DataLabelingJob is used to trigger a human labeling job on + unlabeled data from the following Dataset: + + Attributes: + name (str): + Output only. Resource name of the + DataLabelingJob. + display_name (str): + Required. The user-defined name of the + DataLabelingJob. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + Display name of a DataLabelingJob. + datasets (Sequence[str]): + Required. Dataset resource names. Right now we only support + labeling from a single Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + annotation_labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob.AnnotationLabelsEntry]): + Labels to assign to annotations generated by + this DataLabelingJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. See https://goo.gl/xmQnxf for more + information and examples of labels. System + reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + labeler_count (int): + Required. Number of labelers to work on each + DataItem. + instruction_uri (str): + Required. The Google Cloud Storage location + of the instruction pdf. This pdf is shared with + labelers, and provides detailed description on + how to label DataItems in Datasets. + inputs_schema_uri (str): + Required. Points to a YAML file stored on + Google Cloud Storage describing the config for a + specific type of DataLabelingJob. The schema + files that can be used here are found in the + https://storage.googleapis.com/google-cloud- + aiplatform bucket in the + /schema/datalabelingjob/inputs/ folder. + inputs (google.protobuf.struct_pb2.Value): + Required. Input config parameters for the + DataLabelingJob. + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the job. + labeling_progress (int): + Output only. Current labeling job progress percentage scaled + in interval [0, 100], indicating the percentage of DataItems + that has been finished. + current_spend (google.type.money_pb2.Money): + Output only. Estimated cost(in US dollars) + that the DataLabelingJob has incurred to date. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + DataLabelingJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + DataLabelingJob was updated most recently. + error (google.rpc.status_pb2.Status): + Output only. DataLabelingJob errors. It is only populated + when job's state is ``JOB_STATE_FAILED`` or + ``JOB_STATE_CANCELLED``. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob.LabelsEntry]): + The labels with user-defined metadata to organize your + DataLabelingJobs. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each DataLabelingJob: + + - "aiplatform.googleapis.com/schema": output only, its + value is the + [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s + title. + specialist_pools (Sequence[str]): + The SpecialistPools' resource names + associated with this job. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + DataLabelingJob. If set, this DataLabelingJob + will be secured by this key. + Note: Annotations created in the DataLabelingJob + are associated with the EncryptionSpec of the + Dataset they are exported to. + active_learning_config (google.cloud.aiplatform_v1beta1.types.ActiveLearningConfig): + Parameters that configure the active learning + pipeline. Active learning will label the data + incrementally via several iterations. For every + iteration, it will select a batch of data based + on the sampling strategy. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + datasets = proto.RepeatedField( + proto.STRING, + number=3, + ) + annotation_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=12, + ) + labeler_count = proto.Field( + proto.INT32, + number=4, + ) + instruction_uri = proto.Field( + proto.STRING, + number=5, + ) + inputs_schema_uri = proto.Field( + proto.STRING, + number=6, + ) + inputs = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Value, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=job_state.JobState, + ) + labeling_progress = proto.Field( + proto.INT32, + number=13, + ) + current_spend = proto.Field( + proto.MESSAGE, + number=14, + message=money_pb2.Money, + ) + create_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=22, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + specialist_pools = proto.RepeatedField( + proto.STRING, + number=16, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=20, + message=gca_encryption_spec.EncryptionSpec, + ) + active_learning_config = proto.Field( + proto.MESSAGE, + number=21, + message='ActiveLearningConfig', + ) + + +class ActiveLearningConfig(proto.Message): + r"""Parameters that configure the active learning pipeline. + Active learning will label the data incrementally by several + iterations. For every iteration, it will select a batch of data + based on the sampling strategy. + + Attributes: + max_data_item_count (int): + Max number of human labeled DataItems. + max_data_item_percentage (int): + Max percent of total DataItems for human + labeling. + sample_config (google.cloud.aiplatform_v1beta1.types.SampleConfig): + Active learning data sampling config. For + every active learning labeling iteration, it + will select a batch of data based on the + sampling strategy. + training_config (google.cloud.aiplatform_v1beta1.types.TrainingConfig): + CMLE training config. For every active + learning labeling iteration, system will train a + machine learning model on CMLE. The trained + model will be used by data sampling algorithm to + select DataItems. + """ + + max_data_item_count = proto.Field( + proto.INT64, + number=1, + oneof='human_labeling_budget', + ) + max_data_item_percentage = proto.Field( + proto.INT32, + number=2, + oneof='human_labeling_budget', + ) + sample_config = proto.Field( + proto.MESSAGE, + number=3, + message='SampleConfig', + ) + training_config = proto.Field( + proto.MESSAGE, + number=4, + message='TrainingConfig', + ) + + +class SampleConfig(proto.Message): + r"""Active learning data sampling config. For every active + learning labeling iteration, it will select a batch of data + based on the sampling strategy. + + Attributes: + initial_batch_sample_percentage (int): + The percentage of data needed to be labeled + in the first batch. + following_batch_sample_percentage (int): + The percentage of data needed to be labeled + in each following batch (except the first + batch). + sample_strategy (google.cloud.aiplatform_v1beta1.types.SampleConfig.SampleStrategy): + Field to choose sampling strategy. Sampling + strategy will decide which data should be + selected for human labeling in every batch. + """ + class SampleStrategy(proto.Enum): + r"""Sample strategy decides which subset of DataItems should be + selected for human labeling in every batch. + """ + SAMPLE_STRATEGY_UNSPECIFIED = 0 + UNCERTAINTY = 1 + + initial_batch_sample_percentage = proto.Field( + proto.INT32, + number=1, + oneof='initial_batch_sample_size', + ) + following_batch_sample_percentage = proto.Field( + proto.INT32, + number=3, + oneof='following_batch_sample_size', + ) + sample_strategy = proto.Field( + proto.ENUM, + number=5, + enum=SampleStrategy, + ) + + +class TrainingConfig(proto.Message): + r"""CMLE training config. For every active learning labeling + iteration, system will train a machine learning model on CMLE. + The trained model will be used by data sampling algorithm to + select DataItems. + + Attributes: + timeout_training_milli_hours (int): + The timeout hours for the CMLE training job, + expressed in milli hours i.e. 1,000 value in + this field means 1 hour. + """ + + timeout_training_milli_hours = proto.Field( + proto.INT64, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py new file mode 100644 index 0000000000..a731843354 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import io +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Dataset', + 'ImportDataConfig', + 'ExportDataConfig', + }, +) + + +class Dataset(proto.Message): + r"""A collection of DataItems and Annotations on them. + Attributes: + name (str): + Output only. The resource name of the + Dataset. + display_name (str): + Required. The user-defined name of the + Dataset. The name can be up to 128 characters + long and can be consist of any UTF-8 characters. + metadata_schema_uri (str): + Required. Points to a YAML file stored on + Google Cloud Storage describing additional + information about the Dataset. The schema is + defined as an OpenAPI 3.0.2 Schema Object. The + schema files that can be used here are found in + gs://google-cloud- + aiplatform/schema/dataset/metadata/. + metadata (google.protobuf.struct_pb2.Value): + Required. Additional information about the + Dataset. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Dataset was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Dataset was + last updated. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset.LabelsEntry]): + The labels with user-defined metadata to organize your + Datasets. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Dataset (System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Dataset: + + - "aiplatform.googleapis.com/dataset_metadata_schema": + output only, its value is the + [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] + title. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Dataset. If set, this Dataset and all sub- + resources of this Dataset will be secured by + this key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + metadata = proto.Field( + proto.MESSAGE, + number=8, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=6, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=11, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class ImportDataConfig(proto.Message): + r"""Describes the location from where we import data into a + Dataset, together with the labels that will be applied to the + DataItems and the Annotations. + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + The Google Cloud Storage location for the + input content. + data_item_labels (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig.DataItemLabelsEntry]): + Labels that will be applied to newly imported DataItems. If + an identical DataItem as one being imported already exists + in the Dataset, then these labels will be appended to these + of the already existing one, and if labels with identical + key is imported before, the old label value will be + overwritten. If two DataItems are identical in the same + import data operation, the labels will be combined and if + key collision happens in this case, one of the values will + be picked randomly. Two DataItems are considered identical + if their content bytes are identical (e.g. image bytes or + pdf bytes). These labels will be overridden by Annotation + labels specified inside index file referenced by + [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri], + e.g. jsonl file. + import_schema_uri (str): + Required. Points to a YAML file stored on Google Cloud + Storage describing the import format. Validation will be + done against the schema. The schema is defined as an + `OpenAPI 3.0.2 Schema + Object `__. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + oneof='source', + message=io.GcsSource, + ) + data_item_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=2, + ) + import_schema_uri = proto.Field( + proto.STRING, + number=4, + ) + + +class ExportDataConfig(proto.Message): + r"""Describes what part of the Dataset is to be exported, the + destination of the export and how to export. + + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Google Cloud Storage location where the output is to be + written to. In the given directory a new directory will be + created with name: + ``export-data--`` + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format. All export output will be written into that + directory. Inside that directory, annotations with the same + schema will be grouped into sub directories which are named + with the corresponding annotations' schema title. Inside + these sub directories, a schema.yaml will be created to + describe the output format. + annotations_filter (str): + A filter on Annotations of the Dataset. Only Annotations on + to-be-exported DataItems(specified by [data_items_filter][]) + that match this filter will be exported. The filter syntax + is the same as in + [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message=io.GcsDestination, + ) + annotations_filter = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py new file mode 100644 index 0000000000..fa0a4f59c2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py @@ -0,0 +1,542 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateDatasetRequest', + 'CreateDatasetOperationMetadata', + 'GetDatasetRequest', + 'UpdateDatasetRequest', + 'ListDatasetsRequest', + 'ListDatasetsResponse', + 'DeleteDatasetRequest', + 'ImportDataRequest', + 'ImportDataResponse', + 'ImportDataOperationMetadata', + 'ExportDataRequest', + 'ExportDataResponse', + 'ExportDataOperationMetadata', + 'ListDataItemsRequest', + 'ListDataItemsResponse', + 'GetAnnotationSpecRequest', + 'ListAnnotationsRequest', + 'ListAnnotationsResponse', + }, +) + + +class CreateDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Dataset in. Format: + ``projects/{project}/locations/{location}`` + dataset (google.cloud.aiplatform_v1beta1.types.Dataset): + Required. The Dataset to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + dataset = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.Dataset, + ) + + +class CreateDatasetOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. + + Attributes: + name (str): + Required. The name of the Dataset resource. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. + + Attributes: + dataset (google.cloud.aiplatform_v1beta1.types.Dataset): + Required. The Dataset which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + Updatable fields: + + - ``display_name`` + - ``description`` + - ``labels`` + """ + + dataset = proto.Field( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListDatasetsRequest(proto.Message): + r"""Request message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + + Attributes: + parent (str): + Required. The name of the Dataset's parent resource. Format: + ``projects/{project}/locations/{location}`` + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``display_name``: supports = and != + - ``metadata_schema_uri``: supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDatasetsResponse(proto.Message): + r"""Response message for + [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. + + Attributes: + datasets (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset]): + A list of Datasets that matches the specified + filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + datasets = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_dataset.Dataset, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteDatasetRequest(proto.Message): + r"""Request message for + [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. + + Attributes: + name (str): + Required. The resource name of the Dataset to delete. + Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ImportDataRequest(proto.Message): + r"""Request message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + + Attributes: + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + import_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]): + Required. The desired input locations. The + contents of all input locations will be imported + in one batch. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + import_configs = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gca_dataset.ImportDataConfig, + ) + + +class ImportDataResponse(proto.Message): + r"""Response message for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + """ + + +class ImportDataOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class ExportDataRequest(proto.Message): + r"""Request message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + Attributes: + name (str): + Required. The name of the Dataset resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + export_config (google.cloud.aiplatform_v1beta1.types.ExportDataConfig): + Required. The desired output location. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + export_config = proto.Field( + proto.MESSAGE, + number=2, + message=gca_dataset.ExportDataConfig, + ) + + +class ExportDataResponse(proto.Message): + r"""Response message for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + Attributes: + exported_files (Sequence[str]): + All of the files that are exported in this + export operation. + """ + + exported_files = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class ExportDataOperationMetadata(proto.Message): + r"""Runtime operation information for + [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + gcs_output_directory (str): + A Google Cloud Storage directory which path + ends with '/'. The exported data is stored in + the directory. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + gcs_output_directory = proto.Field( + proto.STRING, + number=2, + ) + + +class ListDataItemsRequest(proto.Message): + r"""Request message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + + Attributes: + parent (str): + Required. The resource name of the Dataset to list DataItems + from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a + field name for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDataItemsResponse(proto.Message): + r"""Response message for + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. + + Attributes: + data_items (Sequence[google.cloud.aiplatform_v1beta1.types.DataItem]): + A list of DataItems that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + data_items = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=data_item.DataItem, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetAnnotationSpecRequest(proto.Message): + r"""Request message for + [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. + + Attributes: + name (str): + Required. The name of the AnnotationSpec resource. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListAnnotationsRequest(proto.Message): + r"""Request message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + + Attributes: + parent (str): + Required. The resource name of the DataItem to list + Annotations from. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + order_by (str): + A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a + field name for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListAnnotationsResponse(proto.Message): + r"""Response message for + [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + + Attributes: + annotations (Sequence[google.cloud.aiplatform_v1beta1.types.Annotation]): + A list of Annotations that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + annotations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=annotation.Annotation, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py new file mode 100644 index 0000000000..234bfc9b59 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'DeployedIndexRef', + }, +) + + +class DeployedIndexRef(proto.Message): + r"""Points to a DeployedIndex. + Attributes: + index_endpoint (str): + Immutable. A resource name of the + IndexEndpoint. + deployed_index_id (str): + Immutable. The ID of the DeployedIndex in the + above IndexEndpoint. + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py new file mode 100644 index 0000000000..2fb07a25bf --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'DeployedModelRef', + }, +) + + +class DeployedModelRef(proto.Message): + r"""Points to a DeployedModel. + Attributes: + endpoint (str): + Immutable. A resource name of an Endpoint. + deployed_model_id (str): + Immutable. An ID of a DeployedModel in the + above Endpoint. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py new file mode 100644 index 0000000000..ad7e6df830 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'EncryptionSpec', + }, +) + + +class EncryptionSpec(proto.Message): + r"""Represents a customer-managed encryption key spec that can be + applied to a top-level resource. + + Attributes: + kms_key_name (str): + Required. The Cloud KMS resource identifier of the customer + managed encryption key used to protect a resource. Has the + form: + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. + """ + + kms_key_name = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py new file mode 100644 index 0000000000..c555d5e8e4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Endpoint', + 'DeployedModel', + }, +) + + +class Endpoint(proto.Message): + r"""Models are deployed into it, and afterwards Endpoint is + called to obtain predictions and explanations. + + Attributes: + name (str): + Output only. The resource name of the + Endpoint. + display_name (str): + Required. The display name of the Endpoint. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Endpoint. + deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModel]): + Output only. The models deployed in this Endpoint. To add or + remove DeployedModels use + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] + and + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel] + respectively. + traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint.TrafficSplitEntry]): + A map from a DeployedModel's ID to the + percentage of this Endpoint's traffic that + should be forwarded to that DeployedModel. + If a DeployedModel's ID is not listed in this + map, then it receives no traffic. + + The traffic percentage values must add up to + 100, or map must be empty if the Endpoint is to + not accept any traffic at a moment. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint.LabelsEntry]): + The labels with user-defined metadata to + organize your Endpoints. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Endpoint was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Endpoint was + last updated. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for an + Endpoint. If set, this Endpoint and all sub- + resources of this Endpoint will be secured by + this key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + deployed_models = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='DeployedModel', + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=5, + ) + etag = proto.Field( + proto.STRING, + number=6, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + create_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=10, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class DeployedModel(proto.Message): + r"""A deployment of a Model. Endpoints contain one or more + DeployedModels. + + Attributes: + dedicated_resources (google.cloud.aiplatform_v1beta1.types.DedicatedResources): + A description of resources that are dedicated + to the DeployedModel, and that need a higher + degree of manual configuration. + automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): + A description of resources that to large + degree are decided by AI Platform, and require + only a modest additional configuration. + id (str): + Output only. The ID of the DeployedModel. + model (str): + Required. The name of the Model that this is + the deployment of. Note that the Model may be in + a different location than the DeployedModel's + Endpoint. + display_name (str): + The display name of the DeployedModel. If not provided upon + creation, the Model's display_name is used. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the DeployedModel + was created. + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + Explanation configuration for this DeployedModel. + + When deploying a Model using + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel], + this value overrides the value of + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. + All fields of + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + are optional in the request. If a field of + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + is not populated, the value of the same field of + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + is inherited. If the corresponding + [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] + is not populated, all fields of the + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + will be used for the explanation configuration. + service_account (str): + The service account that the DeployedModel's container runs + as. Specify the email address of the service account. If + this service account is not specified, the container runs as + a service account that doesn't have access to the resource + project. + + Users deploying the Model must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + enable_container_logging (bool): + If true, the container of the DeployedModel instances will + send ``stderr`` and ``stdout`` streams to Stackdriver + Logging. + + Only supported for custom-trained Models and AutoML Tabular + Models. + enable_access_logging (bool): + These logs are like standard server access + logs, containing information like timestamp and + latency for each prediction request. + Note that Stackdriver logs may incur a cost, + especially if your project receives prediction + requests at a high queries per second rate + (QPS). Estimate your costs before enabling this + option. + """ + + dedicated_resources = proto.Field( + proto.MESSAGE, + number=7, + oneof='prediction_resources', + message=machine_resources.DedicatedResources, + ) + automatic_resources = proto.Field( + proto.MESSAGE, + number=8, + oneof='prediction_resources', + message=machine_resources.AutomaticResources, + ) + id = proto.Field( + proto.STRING, + number=1, + ) + model = proto.Field( + proto.STRING, + number=2, + ) + display_name = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=9, + message=explanation.ExplanationSpec, + ) + service_account = proto.Field( + proto.STRING, + number=11, + ) + enable_container_logging = proto.Field( + proto.BOOL, + number=12, + ) + enable_access_logging = proto.Field( + proto.BOOL, + number=13, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py new file mode 100644 index 0000000000..674662697d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -0,0 +1,380 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateEndpointRequest', + 'CreateEndpointOperationMetadata', + 'GetEndpointRequest', + 'ListEndpointsRequest', + 'ListEndpointsResponse', + 'UpdateEndpointRequest', + 'DeleteEndpointRequest', + 'DeployModelRequest', + 'DeployModelResponse', + 'DeployModelOperationMetadata', + 'UndeployModelRequest', + 'UndeployModelResponse', + 'UndeployModelOperationMetadata', + }, +) + + +class CreateEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Endpoint in. Format: + ``projects/{project}/locations/{location}`` + endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): + Required. The Endpoint to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + endpoint = proto.Field( + proto.MESSAGE, + number=2, + message=gca_endpoint.Endpoint, + ) + + +class CreateEndpointOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] + + Attributes: + name (str): + Required. The name of the Endpoint resource. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListEndpointsRequest(proto.Message): + r"""Request message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the Endpoints. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``endpoint`` supports = and !=. ``endpoint`` represents + the Endpoint ID, i.e. the last segment of the Endpoint's + [resource + name][google.cloud.aiplatform.v1beta1.Endpoint.name]. + - ``display_name`` supports = and, != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``endpoint=1`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListEndpointsResponse.next_page_token] + of the previous + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListEndpointsResponse(proto.Message): + r"""Response message for + [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. + + Attributes: + endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint]): + List of Endpoints in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListEndpointsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + endpoints = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_endpoint.Endpoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + + Attributes: + endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): + Required. The Endpoint which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + endpoint = proto.Field( + proto.MESSAGE, + number=1, + message=gca_endpoint.Endpoint, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteEndpointRequest(proto.Message): + r"""Request message for + [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. + + Attributes: + name (str): + Required. The name of the Endpoint resource to be deleted. + Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class DeployModelRequest(proto.Message): + r"""Request message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint resource into which to + deploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): + Required. The DeployedModel to be created within the + Endpoint. Note that + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + must be updated for the DeployedModel to start receiving + traffic, either as part of this call, or via + [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]): + A map from a DeployedModel's ID to the percentage of this + Endpoint's traffic that should be forwarded to that + DeployedModel. + + If this field is non-empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. To refer to the ID of the just + being deployed Model, a "0" should be used, and the actual + ID of the new DeployedModel will be filled in its place by + this method. The traffic percentage values must add up to + 100. + + If this field is empty, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + is not updated. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model = proto.Field( + proto.MESSAGE, + number=2, + message=gca_endpoint.DeployedModel, + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=3, + ) + + +class DeployModelResponse(proto.Message): + r"""Response message for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + Attributes: + deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): + The DeployedModel that had been deployed in + the Endpoint. + """ + + deployed_model = proto.Field( + proto.MESSAGE, + number=1, + message=gca_endpoint.DeployedModel, + ) + + +class DeployModelOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UndeployModelRequest(proto.Message): + r"""Request message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint resource from which to + undeploy a Model. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + deployed_model_id (str): + Required. The ID of the DeployedModel to be + undeployed from the Endpoint. + traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): + If this field is provided, then the Endpoint's + [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] + will be overwritten with it. If last DeployedModel is being + undeployed from the Endpoint, the [Endpoint.traffic_split] + will always end up empty when this call returns. A + DeployedModel will be successfully undeployed only if it + doesn't have any traffic assigned to it when this method + executes, or if this field unassigns any traffic to it. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + traffic_split = proto.MapField( + proto.STRING, + proto.INT32, + number=3, + ) + + +class UndeployModelResponse(proto.Message): + r"""Response message for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + """ + + +class UndeployModelOperationMetadata(proto.Message): + r"""Runtime operation information for + [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py new file mode 100644 index 0000000000..550365e621 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'EntityType', + }, +) + + +class EntityType(proto.Message): + r"""An entity type is a type of object in a system that needs to + be modeled and have stored information about. For example, + driver is an entity type, and driver0 is an instance of an + entity type driver. + + Attributes: + name (str): + Immutable. Name of the EntityType. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + + The last part entity_type is assigned by the client. The + entity_type can be up to 64 characters long and can consist + only of ASCII Latin letters A-Z and a-z and underscore(_), + and ASCII digits 0-9 starting with a letter. The value will + be unique given a featurestore. + description (str): + Optional. Description of the EntityType. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your EntityTypes. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one EntityType + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Optional. Used to perform a consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): + Optional. The default monitoring configuration for all + Features under this EntityType. + + If this is populated with + [FeaturestoreMonitoringConfig.monitoring_interval] + specified, snapshot analysis monitoring is enabled. + Otherwise, snapshot analysis monitoring is disabled. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + monitoring_config = proto.Field( + proto.MESSAGE, + number=8, + message=featurestore_monitoring.FeaturestoreMonitoringConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py new file mode 100644 index 0000000000..2775473b9e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'EnvVar', + }, +) + + +class EnvVar(proto.Message): + r"""Represents an environment variable present in a Container or + Python Module. + + Attributes: + name (str): + Required. Name of the environment variable. + Must be a valid C identifier. + value (str): + Required. Variables that reference a $(VAR_NAME) are + expanded using the previous defined environment variables in + the container and any service environment variables. If a + variable cannot be resolved, the reference in the input + string will be unchanged. The $(VAR_NAME) syntax can be + escaped with a double $$, ie: $$(VAR_NAME). Escaped + references will never be expanded, regardless of whether the + variable exists or not. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py new file mode 100644 index 0000000000..ac1f78d44a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Event', + }, +) + + +class Event(proto.Message): + r"""An edge describing the relationship between an Artifact and + an Execution in a lineage graph. + + Attributes: + artifact (str): + Required. The relative resource name of the + Artifact in the Event. + execution (str): + Output only. The relative resource name of + the Execution in the Event. + event_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time the Event occurred. + type_ (google.cloud.aiplatform_v1beta1.types.Event.Type): + Required. The type of the Event. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Event.LabelsEntry]): + The labels with user-defined metadata to + annotate Events. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Event (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + """ + class Type(proto.Enum): + r"""Describes whether an Event's Artifact is the Execution's + input or output. + """ + TYPE_UNSPECIFIED = 0 + INPUT = 1 + OUTPUT = 2 + + artifact = proto.Field( + proto.STRING, + number=1, + ) + execution = proto.Field( + proto.STRING, + number=2, + ) + event_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + type_ = proto.Field( + proto.ENUM, + number=4, + enum=Type, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py new file mode 100644 index 0000000000..2b2e531a85 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Execution', + }, +) + + +class Execution(proto.Message): + r"""Instance of a general execution. + Attributes: + name (str): + Output only. The resource name of the + Execution. + display_name (str): + User provided display name of the Execution. + May be up to 128 Unicode characters. + state (google.cloud.aiplatform_v1beta1.types.Execution.State): + The state of this Execution. This is a + property of the Execution, and does not imply or + capture any ongoing process. This property is + managed by clients (such as Vertex Pipelines) + and the system does not prescribe or check the + validity of state transitions. + etag (str): + An eTag used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Execution.LabelsEntry]): + The labels with user-defined metadata to + organize your Executions. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Execution (System labels are + excluded). + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Execution + was last updated. + schema_title (str): + The title of the schema describing the + metadata. + Schema title and version is expected to be + registered in earlier Create Schema calls. And + both are used together as unique identifiers to + identify schemas within the local metadata + store. + schema_version (str): + The version of the schema in ``schema_title`` to use. + + Schema title and version is expected to be registered in + earlier Create Schema calls. And both are used together as + unique identifiers to identify schemas within the local + metadata store. + metadata (google.protobuf.struct_pb2.Struct): + Properties of the Execution. + description (str): + Description of the Execution + """ + class State(proto.Enum): + r"""Describes the state of the Execution.""" + STATE_UNSPECIFIED = 0 + NEW = 1 + RUNNING = 2 + COMPLETE = 3 + FAILED = 4 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=10, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + schema_title = proto.Field( + proto.STRING, + number=13, + ) + schema_version = proto.Field( + proto.STRING, + number=14, + ) + metadata = proto.Field( + proto.MESSAGE, + number=15, + message=struct_pb2.Struct, + ) + description = proto.Field( + proto.STRING, + number=16, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py new file mode 100644 index 0000000000..115897a2f9 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -0,0 +1,644 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Explanation', + 'ModelExplanation', + 'Attribution', + 'ExplanationSpec', + 'ExplanationParameters', + 'SampledShapleyAttribution', + 'IntegratedGradientsAttribution', + 'XraiAttribution', + 'SmoothGradConfig', + 'FeatureNoiseSigma', + 'ExplanationSpecOverride', + 'ExplanationMetadataOverride', + }, +) + + +class Explanation(proto.Message): + r"""Explanation of a prediction (provided in + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]) + produced by the Model on a given + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. + + Attributes: + attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]): + Output only. Feature attributions grouped by predicted + outputs. + + For Models that predict only one output, such as regression + Models that predict only one score, there is only one + attibution that explains the predicted output. For Models + that predict multiple outputs, such as multiclass Models + that predict multiple classes, each element explains one + specific item. + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + can be used to identify which output this attribution is + explaining. + + If users set + [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k], + the attributions are sorted by + [instance_output_value][Attributions.instance_output_value] + in descending order. If + [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices] + is specified, the attributions are stored by + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + in the same order as they appear in the output_indices. + """ + + attributions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Attribution', + ) + + +class ModelExplanation(proto.Message): + r"""Aggregated explanation metrics for a Model over a set of + instances. + + Attributes: + mean_attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]): + Output only. Aggregated attributions explaining the Model's + prediction outputs over the set of instances. The + attributions are grouped by outputs. + + For Models that predict only one output, such as regression + Models that predict only one score, there is only one + attibution that explains the predicted output. For Models + that predict multiple outputs, such as multiclass Models + that predict multiple classes, each element explains one + specific item. + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + can be used to identify which output this attribution is + explaining. + + The + [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value], + [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value] + and + [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] + fields are averaged over the test data. + + NOTE: Currently AutoML tabular classification Models produce + only one attribution, which averages attributions over all + the classes it predicts. + [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error] + is not populated. + """ + + mean_attributions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Attribution', + ) + + +class Attribution(proto.Message): + r"""Attribution that explains a particular prediction output. + Attributes: + baseline_output_value (float): + Output only. Model predicted output if the input instance is + constructed from the baselines of all the features defined + in + [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + The field name of the output is determined by the key in + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + + If the Model's predicted output has multiple dimensions + (rank > 1), this is the value in the output located by + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + + If there are multiple baselines, their output values are + averaged. + instance_output_value (float): + Output only. Model predicted output on the corresponding + [explanation instance][ExplainRequest.instances]. The field + name of the output is determined by the key in + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + + If the Model predicted output has multiple dimensions, this + is the value in the output located by + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + feature_attributions (google.protobuf.struct_pb2.Value): + Output only. Attributions of each explained feature. + Features are extracted from the [prediction + instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + according to [explanation metadata for + inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + + The value is a struct, whose keys are the name of the + feature. The values are how much the feature in the + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + contributed to the predicted result. + + The format of the value is determined by the feature's input + format: + + - If the feature is a scalar value, the attribution value + is a [floating + number][google.protobuf.Value.number_value]. + + - If the feature is an array of scalar values, the + attribution value is an + [array][google.protobuf.Value.list_value]. + + - If the feature is a struct, the attribution value is a + [struct][google.protobuf.Value.struct_value]. The keys in + the attribution value struct are the same as the keys in + the feature struct. The formats of the values in the + attribution struct are determined by the formats of the + values in the feature struct. + + The + [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] + field, pointed to by the + [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] + field of the + [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + object, points to the schema file that describes the + features and their attribution values (if it is populated). + output_index (Sequence[int]): + Output only. The index that locates the explained prediction + output. + + If the prediction output is a scalar value, output_index is + not populated. If the prediction output has multiple + dimensions, the length of the output_index list is the same + as the number of dimensions of the output. The i-th element + in output_index is the element index of the i-th dimension + of the output vector. Indices start from 0. + output_display_name (str): + Output only. The display name of the output identified by + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + For example, the predicted class name by a + multi-classification Model. + + This field is only populated iff the Model predicts display + names as a separate field along with the explained output. + The predicted display name must has the same shape of the + explained output, and can be located using output_index. + approximation_error (float): + Output only. Error of + [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] + caused by approximation used in the explanation method. + Lower value means more precise attributions. + + - For Sampled Shapley + [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution], + increasing + [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] + might reduce the error. + - For Integrated Gradients + [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], + increasing + [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] + might reduce the error. + - For [XRAI + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], + increasing + [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] + might reduce the error. + + See `this + introduction `__ + for more information. + output_name (str): + Output only. Name of the explain output. Specified as the + key in + [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + """ + + baseline_output_value = proto.Field( + proto.DOUBLE, + number=1, + ) + instance_output_value = proto.Field( + proto.DOUBLE, + number=2, + ) + feature_attributions = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + output_index = proto.RepeatedField( + proto.INT32, + number=4, + ) + output_display_name = proto.Field( + proto.STRING, + number=5, + ) + approximation_error = proto.Field( + proto.DOUBLE, + number=6, + ) + output_name = proto.Field( + proto.STRING, + number=7, + ) + + +class ExplanationSpec(proto.Message): + r"""Specification of Model explanation. + Attributes: + parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): + Required. Parameters that configure + explaining of the Model's predictions. + metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata): + Required. Metadata describing the Model's + input and output for explanation. + """ + + parameters = proto.Field( + proto.MESSAGE, + number=1, + message='ExplanationParameters', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message=explanation_metadata.ExplanationMetadata, + ) + + +class ExplanationParameters(proto.Message): + r"""Parameters to configure explaining for Model's predictions. + Attributes: + sampled_shapley_attribution (google.cloud.aiplatform_v1beta1.types.SampledShapleyAttribution): + An attribution method that approximates + Shapley values for features that contribute to + the label being predicted. A sampling strategy + is used to approximate the value rather than + considering all subsets of features. Refer to + this paper for model details: + https://arxiv.org/abs/1306.4265. + integrated_gradients_attribution (google.cloud.aiplatform_v1beta1.types.IntegratedGradientsAttribution): + An attribution method that computes Aumann- + hapley values taking advantage of the model's + fully differentiable structure. Refer to this + paper for more details: + https://arxiv.org/abs/1703.01365 + xrai_attribution (google.cloud.aiplatform_v1beta1.types.XraiAttribution): + An attribution method that redistributes + Integrated Gradients attribution to segmented + regions, taking advantage of the model's fully + differentiable structure. Refer to this paper + for more details: + https://arxiv.org/abs/1906.02825 + XRAI currently performs better on natural + images, like a picture of a house or an animal. + If the images are taken in artificial + environments, like a lab or manufacturing line, + or from diagnostic equipment, like x-rays or + quality-control cameras, use Integrated + Gradients instead. + top_k (int): + If populated, returns attributions for top K + indices of outputs (defaults to 1). Only applies + to Models that predicts more than one outputs + (e,g, multi-class Models). When set to -1, + returns explanations for all outputs. + output_indices (google.protobuf.struct_pb2.ListValue): + If populated, only returns attributions that have + [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + contained in output_indices. It must be an ndarray of + integers, with the same shape of the output it's explaining. + + If not populated, returns attributions for + [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k] + indices of outputs. If neither top_k nor output_indeices is + populated, returns the argmax index of the outputs. + + Only applicable to Models that predict multiple outputs + (e,g, multi-class Models that predict multiple classes). + """ + + sampled_shapley_attribution = proto.Field( + proto.MESSAGE, + number=1, + oneof='method', + message='SampledShapleyAttribution', + ) + integrated_gradients_attribution = proto.Field( + proto.MESSAGE, + number=2, + oneof='method', + message='IntegratedGradientsAttribution', + ) + xrai_attribution = proto.Field( + proto.MESSAGE, + number=3, + oneof='method', + message='XraiAttribution', + ) + top_k = proto.Field( + proto.INT32, + number=4, + ) + output_indices = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.ListValue, + ) + + +class SampledShapleyAttribution(proto.Message): + r"""An attribution method that approximates Shapley values for + features that contribute to the label being predicted. A + sampling strategy is used to approximate the value rather than + considering all subsets of features. + + Attributes: + path_count (int): + Required. The number of feature permutations to consider + when approximating the Shapley values. + + Valid range of its value is [1, 50], inclusively. + """ + + path_count = proto.Field( + proto.INT32, + number=1, + ) + + +class IntegratedGradientsAttribution(proto.Message): + r"""An attribution method that computes the Aumann-Shapley value + taking advantage of the model's fully differentiable structure. + Refer to this paper for more details: + https://arxiv.org/abs/1703.01365 + + Attributes: + step_count (int): + Required. The number of steps for approximating the path + integral. A good value to start is 50 and gradually increase + until the sum to diff property is within the desired error + range. + + Valid range of its value is [1, 100], inclusively. + smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig): + Config for SmoothGrad approximation of + gradients. + When enabled, the gradients are approximated by + averaging the gradients from noisy samples in + the vicinity of the inputs. Adding noise can + help improve the computed gradients. Refer to + this paper for more details: + https://arxiv.org/pdf/1706.03825.pdf + """ + + step_count = proto.Field( + proto.INT32, + number=1, + ) + smooth_grad_config = proto.Field( + proto.MESSAGE, + number=2, + message='SmoothGradConfig', + ) + + +class XraiAttribution(proto.Message): + r"""An explanation method that redistributes Integrated Gradients + attributions to segmented regions, taking advantage of the + model's fully differentiable structure. Refer to this paper for + more details: https://arxiv.org/abs/1906.02825 + + Supported only by image Models. + + Attributes: + step_count (int): + Required. The number of steps for approximating the path + integral. A good value to start is 50 and gradually increase + until the sum to diff property is met within the desired + error range. + + Valid range of its value is [1, 100], inclusively. + smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig): + Config for SmoothGrad approximation of + gradients. + When enabled, the gradients are approximated by + averaging the gradients from noisy samples in + the vicinity of the inputs. Adding noise can + help improve the computed gradients. Refer to + this paper for more details: + https://arxiv.org/pdf/1706.03825.pdf + """ + + step_count = proto.Field( + proto.INT32, + number=1, + ) + smooth_grad_config = proto.Field( + proto.MESSAGE, + number=2, + message='SmoothGradConfig', + ) + + +class SmoothGradConfig(proto.Message): + r"""Config for SmoothGrad approximation of gradients. + When enabled, the gradients are approximated by averaging the + gradients from noisy samples in the vicinity of the inputs. + Adding noise can help improve the computed gradients. Refer to + this paper for more details: + https://arxiv.org/pdf/1706.03825.pdf + + Attributes: + noise_sigma (float): + This is a single float value and will be used to add noise + to all the features. Use this field when all features are + normalized to have the same distribution: scale to range [0, + 1], [-1, 1] or z-scoring, where features are normalized to + have 0-mean and 1-variance. Learn more about + `normalization `__. + + For best results the recommended value is about 10% - 20% of + the standard deviation of the input feature. Refer to + section 3.2 of the SmoothGrad paper: + https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. + + If the distribution is different per feature, set + [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma] + instead for each feature. + feature_noise_sigma (google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma): + This is similar to + [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma], + but provides additional flexibility. A separate noise sigma + can be provided for each feature, which is useful if their + distributions are different. No noise is added to features + that are not set. If this field is unset, + [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] + will be used for all features. + noisy_sample_count (int): + The number of gradient samples to use for approximation. The + higher this number, the more accurate the gradient is, but + the runtime complexity increases by this factor as well. + Valid range of its value is [1, 50]. Defaults to 3. + """ + + noise_sigma = proto.Field( + proto.FLOAT, + number=1, + oneof='GradientNoiseSigma', + ) + feature_noise_sigma = proto.Field( + proto.MESSAGE, + number=2, + oneof='GradientNoiseSigma', + message='FeatureNoiseSigma', + ) + noisy_sample_count = proto.Field( + proto.INT32, + number=3, + ) + + +class FeatureNoiseSigma(proto.Message): + r"""Noise sigma by features. Noise sigma represents the standard + deviation of the gaussian kernel that will be used to add noise + to interpolated inputs prior to computing gradients. + + Attributes: + noise_sigma (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma.NoiseSigmaForFeature]): + Noise sigma per feature. No noise is added to + features that are not set. + """ + + class NoiseSigmaForFeature(proto.Message): + r"""Noise sigma for a single feature. + Attributes: + name (str): + The name of the input feature for which noise sigma is + provided. The features are defined in [explanation metadata + inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + sigma (float): + This represents the standard deviation of the Gaussian + kernel that will be used to add noise to the feature prior + to computing gradients. Similar to + [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] + but represents the noise added to the current feature. + Defaults to 0.1. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + sigma = proto.Field( + proto.FLOAT, + number=2, + ) + + noise_sigma = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=NoiseSigmaForFeature, + ) + + +class ExplanationSpecOverride(proto.Message): + r"""The + [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] + entries that can be overridden at [online + explanation][PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] + time. + + Attributes: + parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): + The parameters to be overridden. Note that the + [method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method] + cannot be changed. If not specified, no parameter is + overridden. + metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride): + The metadata to be overridden. If not + specified, no metadata is overridden. + """ + + parameters = proto.Field( + proto.MESSAGE, + number=1, + message='ExplanationParameters', + ) + metadata = proto.Field( + proto.MESSAGE, + number=2, + message='ExplanationMetadataOverride', + ) + + +class ExplanationMetadataOverride(proto.Message): + r"""The + [ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata] + entries that can be overridden at [online + explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] + time. + + Attributes: + inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride.InputsEntry]): + Required. Overrides the [input + metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs] + of the features. The key is the name of the feature to be + overridden. The keys specified here must exist in the input + metadata to be overridden. If a feature is not specified + here, the corresponding feature's input metadata is not + overridden. + """ + + class InputMetadataOverride(proto.Message): + r"""The [input + metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] + entries to be overridden. + + Attributes: + input_baselines (Sequence[google.protobuf.struct_pb2.Value]): + Baseline inputs for this feature. + + This overrides the ``input_baseline`` field of the + [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] + object of the corresponding feature's input metadata. If + it's not specified, the original baselines are not + overridden. + """ + + input_baselines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=InputMetadataOverride, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py new file mode 100644 index 0000000000..c2f807c34b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py @@ -0,0 +1,449 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ExplanationMetadata', + }, +) + + +class ExplanationMetadata(proto.Message): + r"""Metadata describing the Model's input and output for + explanation. + + Attributes: + inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputsEntry]): + Required. Map from feature names to feature input metadata. + Keys are the name of the features. Values are the + specification of the feature. + + An empty InputMetadata is valid. It describes a text feature + which has the name specified as the key in + [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + The baseline of the empty feature is chosen by Vertex AI. + + For Vertex AI-provided Tensorflow images, the key can be any + friendly name of the feature. Once specified, + [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] + are keyed by this key (if not grouped with another feature). + + For custom images, the key must match with the key in + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. + outputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.OutputsEntry]): + Required. Map from output names to output + metadata. + For Vertex AI-provided Tensorflow images, keys + can be any user defined string that consists of + any UTF-8 characters. + For custom images, keys are the name of the + output field in the prediction to be explained. + + Currently only one key is allowed. + feature_attributions_schema_uri (str): + Points to a YAML file stored on Google Cloud Storage + describing the format of the [feature + attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML tabular Models always have this field populated by + Vertex AI. Note: The URI given on output may be different, + including the URI scheme, than the one given on input. The + output URI will point to a location where the user only has + a read access. + """ + + class InputMetadata(proto.Message): + r"""Metadata of the input of a feature. + + Fields other than + [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] + are applicable only for Models that are using Vertex AI-provided + images for Tensorflow. + + Attributes: + input_baselines (Sequence[google.protobuf.struct_pb2.Value]): + Baseline inputs for this feature. + + If no baseline is specified, Vertex AI chooses the baseline + for this feature. If multiple baselines are specified, + Vertex AI returns the average attributions across them in + [Attributions.baseline_attribution][]. + + For Vertex AI-provided Tensorflow images (both 1.x and 2.x), + the shape of each baseline must match the shape of the input + tensor. If a scalar is provided, we broadcast to the same + shape as the input tensor. + + For custom images, the element of the baselines must be in + the same format as the feature's input in the + [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. + The schema of any single instance may be specified via + Endpoint's DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + input_tensor_name (str): + Name of the input tensor for this feature. + Required and is only applicable to Vertex AI- + provided images for Tensorflow. + encoding (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Encoding): + Defines how the feature is encoded into the + input tensor. Defaults to IDENTITY. + modality (str): + Modality of the feature. Valid values are: + numeric, image. Defaults to numeric. + feature_value_domain (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.FeatureValueDomain): + The domain details of the input feature + value. Like min/max, original mean or standard + deviation if normalized. + indices_tensor_name (str): + Specifies the index of the values of the input tensor. + Required when the input tensor is a sparse representation. + Refer to Tensorflow documentation for more details: + https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + dense_shape_tensor_name (str): + Specifies the shape of the values of the input if the input + is a sparse representation. Refer to Tensorflow + documentation for more details: + https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + index_feature_mapping (Sequence[str]): + A list of feature names for each index in the input tensor. + Required when the input + [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] + is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. + encoded_tensor_name (str): + Encoded tensor is a transformation of the input tensor. Must + be provided if choosing [Integrated Gradients + attribution][ExplanationParameters.integrated_gradients_attribution] + or [XRAI + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution] + and the input tensor is not differentiable. + + An encoded tensor is generated if the input tensor is + encoded by a lookup table. + encoded_baselines (Sequence[google.protobuf.struct_pb2.Value]): + A list of baselines for the encoded tensor. + The shape of each baseline should match the + shape of the encoded tensor. If a scalar is + provided, Vertex AI broadcasts to the same shape + as the encoded tensor. + visualization (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization): + Visualization configurations for image + explanation. + group_name (str): + Name of the group that the input belongs to. Features with + the same group name will be treated as one feature when + computing attributions. Features grouped together can have + different shapes in value. If provided, there will be one + single attribution generated in [ + featureAttributions][Attribution.feature_attributions], + keyed by the group name. + """ + class Encoding(proto.Enum): + r"""Defines how the feature is encoded to [encoded_tensor][]. Defaults + to IDENTITY. + """ + ENCODING_UNSPECIFIED = 0 + IDENTITY = 1 + BAG_OF_FEATURES = 2 + BAG_OF_FEATURES_SPARSE = 3 + INDICATOR = 4 + COMBINED_EMBEDDING = 5 + CONCAT_EMBEDDING = 6 + + class FeatureValueDomain(proto.Message): + r"""Domain details of the input feature value. Provides numeric + information about the feature, such as its range (min, max). If the + feature has been pre-processed, for example with z-scoring, then it + provides information about how to recover the original feature. For + example, if the input feature is an image and it has been + pre-processed to obtain 0-mean and stddev = 1 values, then + original_mean, and original_stddev refer to the mean and stddev of + the original feature (e.g. image tensor) from which input feature + (with mean = 0 and stddev = 1) was obtained. + + Attributes: + min_value (float): + The minimum permissible value for this + feature. + max_value (float): + The maximum permissible value for this + feature. + original_mean (float): + If this input feature has been normalized to a mean value of + 0, the original_mean specifies the mean value of the domain + prior to normalization. + original_stddev (float): + If this input feature has been normalized to a standard + deviation of 1.0, the original_stddev specifies the standard + deviation of the domain prior to normalization. + """ + + min_value = proto.Field( + proto.FLOAT, + number=1, + ) + max_value = proto.Field( + proto.FLOAT, + number=2, + ) + original_mean = proto.Field( + proto.FLOAT, + number=3, + ) + original_stddev = proto.Field( + proto.FLOAT, + number=4, + ) + + class Visualization(proto.Message): + r"""Visualization configurations for image explanation. + Attributes: + type_ (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Type): + Type of the image visualization. Only applicable to + [Integrated Gradients attribution] + [ExplanationParameters.integrated_gradients_attribution]. + OUTLINES shows regions of attribution, while PIXELS shows + per-pixel attribution. Defaults to OUTLINES. + polarity (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Polarity): + Whether to only highlight pixels with + positive contributions, negative or both. + Defaults to POSITIVE. + color_map (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.ColorMap): + The color scheme used for the highlighted areas. + + Defaults to PINK_GREEN for [Integrated Gradients + attribution][ExplanationParameters.integrated_gradients_attribution], + which shows positive attributions in green and negative in + pink. + + Defaults to VIRIDIS for [XRAI + attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], + which highlights the most influential regions in yellow and + the least influential in blue. + clip_percent_upperbound (float): + Excludes attributions above the specified percentile from + the highlighted areas. Using the clip_percent_upperbound and + clip_percent_lowerbound together can be useful for filtering + out noise and making it easier to see areas of strong + attribution. Defaults to 99.9. + clip_percent_lowerbound (float): + Excludes attributions below the specified + percentile, from the highlighted areas. Defaults + to 62. + overlay_type (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.OverlayType): + How the original image is displayed in the + visualization. Adjusting the overlay can help + increase visual clarity if the original image + makes it difficult to view the visualization. + Defaults to NONE. + """ + class Type(proto.Enum): + r"""Type of the image visualization. Only applicable to [Integrated + Gradients attribution] + [ExplanationParameters.integrated_gradients_attribution]. + """ + TYPE_UNSPECIFIED = 0 + PIXELS = 1 + OUTLINES = 2 + + class Polarity(proto.Enum): + r"""Whether to only highlight pixels with positive contributions, + negative or both. Defaults to POSITIVE. + """ + POLARITY_UNSPECIFIED = 0 + POSITIVE = 1 + NEGATIVE = 2 + BOTH = 3 + + class ColorMap(proto.Enum): + r"""The color scheme used for highlighting areas.""" + COLOR_MAP_UNSPECIFIED = 0 + PINK_GREEN = 1 + VIRIDIS = 2 + RED = 3 + GREEN = 4 + RED_GREEN = 6 + PINK_WHITE_GREEN = 5 + + class OverlayType(proto.Enum): + r"""How the original image is displayed in the visualization.""" + OVERLAY_TYPE_UNSPECIFIED = 0 + NONE = 1 + ORIGINAL = 2 + GRAYSCALE = 3 + MASK_BLACK = 4 + + type_ = proto.Field( + proto.ENUM, + number=1, + enum='ExplanationMetadata.InputMetadata.Visualization.Type', + ) + polarity = proto.Field( + proto.ENUM, + number=2, + enum='ExplanationMetadata.InputMetadata.Visualization.Polarity', + ) + color_map = proto.Field( + proto.ENUM, + number=3, + enum='ExplanationMetadata.InputMetadata.Visualization.ColorMap', + ) + clip_percent_upperbound = proto.Field( + proto.FLOAT, + number=4, + ) + clip_percent_lowerbound = proto.Field( + proto.FLOAT, + number=5, + ) + overlay_type = proto.Field( + proto.ENUM, + number=6, + enum='ExplanationMetadata.InputMetadata.Visualization.OverlayType', + ) + + input_baselines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + input_tensor_name = proto.Field( + proto.STRING, + number=2, + ) + encoding = proto.Field( + proto.ENUM, + number=3, + enum='ExplanationMetadata.InputMetadata.Encoding', + ) + modality = proto.Field( + proto.STRING, + number=4, + ) + feature_value_domain = proto.Field( + proto.MESSAGE, + number=5, + message='ExplanationMetadata.InputMetadata.FeatureValueDomain', + ) + indices_tensor_name = proto.Field( + proto.STRING, + number=6, + ) + dense_shape_tensor_name = proto.Field( + proto.STRING, + number=7, + ) + index_feature_mapping = proto.RepeatedField( + proto.STRING, + number=8, + ) + encoded_tensor_name = proto.Field( + proto.STRING, + number=9, + ) + encoded_baselines = proto.RepeatedField( + proto.MESSAGE, + number=10, + message=struct_pb2.Value, + ) + visualization = proto.Field( + proto.MESSAGE, + number=11, + message='ExplanationMetadata.InputMetadata.Visualization', + ) + group_name = proto.Field( + proto.STRING, + number=12, + ) + + class OutputMetadata(proto.Message): + r"""Metadata of the prediction output to be explained. + Attributes: + index_display_name_mapping (google.protobuf.struct_pb2.Value): + Static mapping between the index and display name. + + Use this if the outputs are a deterministic n-dimensional + array, e.g. a list of scores of all the classes in a + pre-defined order for a multi-classification Model. It's not + feasible if the outputs are non-deterministic, e.g. the + Model produces top-k classes or sort the outputs by their + values. + + The shape of the value must be an n-dimensional array of + strings. The number of dimensions must match that of the + outputs to be explained. The + [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] + is populated by locating in the mapping with + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + display_name_mapping_key (str): + Specify a field name in the prediction to look for the + display name. + + Use this if the prediction contains the display names for + the outputs. + + The display names in the prediction must have the same shape + of the outputs, so that it can be located by + [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] + for a specific output. + output_tensor_name (str): + Name of the output tensor. Required and is + only applicable to AI Platform provided images + for Tensorflow. + """ + + index_display_name_mapping = proto.Field( + proto.MESSAGE, + number=1, + oneof='display_name_mapping', + message=struct_pb2.Value, + ) + display_name_mapping_key = proto.Field( + proto.STRING, + number=2, + oneof='display_name_mapping', + ) + output_tensor_name = proto.Field( + proto.STRING, + number=3, + ) + + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=InputMetadata, + ) + outputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message=OutputMetadata, + ) + feature_attributions_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py new file mode 100644 index 0000000000..1897e74798 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Feature', + }, +) + + +class Feature(proto.Message): + r"""Feature Metadata information that describes an attribute of + an entity type. For example, apple is an entity type, and color + is a feature that describes apple. + + Attributes: + name (str): + Immutable. Name of the Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + + The last part feature is assigned by the client. The feature + can be up to 64 characters long and can consist only of + ASCII Latin letters A-Z and a-z, underscore(_), and ASCII + digits 0-9 starting with a letter. The value will be unique + given an entity type. + description (str): + Description of the Feature. + value_type (google.cloud.aiplatform_v1beta1.types.Feature.ValueType): + Required. Immutable. Type of Feature value. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this EntityType + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Feature.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your Features. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one Feature + (System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): + Optional. The custom monitoring configuration for this + Feature, if not set, use the monitoring_config defined for + the EntityType this Feature belongs to. + + If this is populated with + [FeaturestoreMonitoringConfig.disabled][] = true, snapshot + analysis monitoring is disabled; if + [FeaturestoreMonitoringConfig.monitoring_interval][] + specified, snapshot analysis monitoring is enabled. + Otherwise, snapshot analysis monitoring config is same as + the EntityType's this Feature belongs to. + monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): + Output only. A list of historical [Snapshot + Analysis][google.cloud.aiplatform.master.FeaturestoreMonitoringConfig.SnapshotAnalysis] + stats requested by user, sorted by + [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time] + descending. + """ + class ValueType(proto.Enum): + r"""An enum representing the value type of a feature.""" + VALUE_TYPE_UNSPECIFIED = 0 + BOOL = 1 + BOOL_ARRAY = 2 + DOUBLE = 3 + DOUBLE_ARRAY = 4 + INT64 = 9 + INT64_ARRAY = 10 + STRING = 11 + STRING_ARRAY = 12 + BYTES = 13 + + name = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + value_type = proto.Field( + proto.ENUM, + number=3, + enum=ValueType, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + monitoring_config = proto.Field( + proto.MESSAGE, + number=9, + message=featurestore_monitoring.FeaturestoreMonitoringConfig, + ) + monitoring_stats = proto.RepeatedField( + proto.MESSAGE, + number=10, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py new file mode 100644 index 0000000000..48dd2b3b84 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'FeatureStatsAnomaly', + }, +) + + +class FeatureStatsAnomaly(proto.Message): + r"""Stats and Anomaly generated at specific timestamp for specific + Feature. The start_time and end_time are used to define the time + range of the dataset that current stats belongs to, e.g. prediction + traffic is bucketed into prediction datasets by time window. If the + Dataset is not defined by time window, start_time = end_time. + Timestamp of the stats and anomalies always refers to end_time. Raw + stats and anomalies are stored in stats_uri or anomaly_uri in the + tensorflow defined protos. Field data_stats contains almost + identical information with the raw stats in Vertex AI defined proto, + for UI to display. + + Attributes: + score (float): + Feature importance score, only populated when cross-feature + monitoring is enabled. For now only used to represent + feature attribution score within range [0, 1] for + [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW] + and + [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT]. + stats_uri (str): + Path of the stats file for current feature values in Cloud + Storage bucket. Format: + gs:////stats. Example: + gs://monitoring_bucket/feature_name/stats. Stats are stored + as binary format with Protobuf message + `tensorflow.metadata.v0.FeatureNameStatistics `__. + anomaly_uri (str): + Path of the anomaly file for current feature values in Cloud + Storage bucket. Format: + gs:////anomalies. Example: + gs://monitoring_bucket/feature_name/anomalies. Stats are + stored as binary format with Protobuf message Anoamlies are + stored as binary format with Protobuf message + [tensorflow.metadata.v0.AnomalyInfo] + (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). + distribution_deviation (float): + Deviation from the current stats to baseline + stats. 1. For categorical feature, the + distribution distance is calculated by + L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. + anomaly_detection_threshold (float): + This is the threshold used when detecting anomalies. The + threshold can be changed by user, so this one might be + different from + [ThresholdConfig.value][google.cloud.aiplatform.v1beta1.ThresholdConfig.value]. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The start timestamp of window where stats were generated. + For objectives where time window doesn't make sense (e.g. + Featurestore Snapshot Monitoring), start_time is only used + to indicate the monitoring intervals, so it always equals to + (end_time - monitoring_interval). + end_time (google.protobuf.timestamp_pb2.Timestamp): + The end timestamp of window where stats were generated. For + objectives where time window doesn't make sense (e.g. + Featurestore Snapshot Monitoring), end_time indicates the + timestamp of the data used to generate stats (e.g. timestamp + we take snapshots for feature values). + """ + + score = proto.Field( + proto.DOUBLE, + number=1, + ) + stats_uri = proto.Field( + proto.STRING, + number=3, + ) + anomaly_uri = proto.Field( + proto.STRING, + number=4, + ) + distribution_deviation = proto.Field( + proto.DOUBLE, + number=5, + ) + anomaly_detection_threshold = proto.Field( + proto.DOUBLE, + number=9, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py new file mode 100644 index 0000000000..3921a7c769 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'IdMatcher', + 'FeatureSelector', + }, +) + + +class IdMatcher(proto.Message): + r"""Matcher for Features of an EntityType by Feature ID. + Attributes: + ids (Sequence[str]): + Required. The following are accepted as ``ids``: + + - A single-element list containing only ``*``, which + selects all Features in the target EntityType, or + - A list containing only Feature IDs, which selects only + Features with those IDs in the target EntityType. + """ + + ids = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class FeatureSelector(proto.Message): + r"""Selector for Features of an EntityType. + Attributes: + id_matcher (google.cloud.aiplatform_v1beta1.types.IdMatcher): + Required. Matches Features based on ID. + """ + + id_matcher = proto.Field( + proto.MESSAGE, + number=1, + message='IdMatcher', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py new file mode 100644 index 0000000000..6d51c0c35b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Featurestore', + }, +) + + +class Featurestore(proto.Message): + r"""Featurestore configuration information on how the + Featurestore is configured. + + Attributes: + name (str): + Output only. Name of the Featurestore. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Featurestore + was last updated. + etag (str): + Optional. Used to perform consistent read- + odify-write updates. If not set, a blind + "overwrite" update happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore.LabelsEntry]): + Optional. The labels with user-defined + metadata to organize your Featurestore. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + on and examples of labels. No more than 64 user + labels can be associated with one + Featurestore(System labels are excluded)." + System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + online_serving_config (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig): + Required. Config for online serving + resources. + state (google.cloud.aiplatform_v1beta1.types.Featurestore.State): + Output only. State of the featurestore. + """ + class State(proto.Enum): + r"""Possible states a Featurestore can have.""" + STATE_UNSPECIFIED = 0 + STABLE = 1 + UPDATING = 2 + + class OnlineServingConfig(proto.Message): + r"""OnlineServingConfig specifies the details for provisioning + online serving resources. + + Attributes: + fixed_node_count (int): + Required. The number of nodes for each + cluster. The number of nodes will not scale + automatically but can be scaled manually by + providing different values when updating. + """ + + fixed_node_count = proto.Field( + proto.INT32, + number=2, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + online_serving_config = proto.Field( + proto.MESSAGE, + number=7, + message=OnlineServingConfig, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=State, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py new file mode 100644 index 0000000000..ba63973bc8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'FeaturestoreMonitoringConfig', + }, +) + + +class FeaturestoreMonitoringConfig(proto.Message): + r"""Configuration of how features in Featurestore are monitored. + Attributes: + snapshot_analysis (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.SnapshotAnalysis): + The config for Snapshot Analysis Based + Feature Monitoring. + """ + + class SnapshotAnalysis(proto.Message): + r"""Configuration of the Featurestore's Snapshot Analysis Based + Monitoring. This type of analysis generates statistics for each + Feature based on a snapshot of the latest feature value of each + entities every monitoring_interval. + + Attributes: + disabled (bool): + The monitoring schedule for snapshot analysis. For + EntityType-level config: unset / disabled = true indicates + disabled by default for Features under it; otherwise by + default enable snapshot analysis monitoring with + monitoring_interval for Features under it. Feature-level + config: disabled = true indicates disabled regardless of the + EntityType-level config; unset monitoring_interval indicates + going with EntityType-level config; otherwise run snapshot + analysis monitoring with monitoring_interval regardless of + the EntityType-level config. Explicitly Disable the snapshot + analysis based monitoring. + monitoring_interval (google.protobuf.duration_pb2.Duration): + Configuration of the snapshot analysis based + monitoring pipeline running interval. The value + is rolled up to full day. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + monitoring_interval = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + + snapshot_analysis = proto.Field( + proto.MESSAGE, + number=1, + message=SnapshotAnalysis, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py new file mode 100644 index 0000000000..aeb8c1cd53 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py @@ -0,0 +1,337 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import feature_selector as gca_feature_selector +from google.cloud.aiplatform_v1beta1.types import types +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ReadFeatureValuesRequest', + 'ReadFeatureValuesResponse', + 'StreamingReadFeatureValuesRequest', + 'FeatureValue', + 'FeatureValueList', + }, +) + + +class ReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + entity_type (str): + Required. The resource name of the EntityType for the entity + being read. Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be "user". + entity_id (str): + Required. ID for a specific entity. For example, for a + machine learning model predicting user clicks on a website, + an entity ID could be "user_123". + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + entity_id = proto.Field( + proto.STRING, + number=2, + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=3, + message=gca_feature_selector.FeatureSelector, + ) + + +class ReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. + + Attributes: + header (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.Header): + Response header. + entity_view (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView): + Entity view with Feature values. This may be + the entity in the Featurestore if values for all + Features were requested, or a projection of the + entity in the Featurestore if values for only + some Features were requested. + """ + + class FeatureDescriptor(proto.Message): + r"""Metadata for requested Features. + Attributes: + id (str): + Feature ID. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + + class Header(proto.Message): + r"""Response header with metadata for the requested + [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest.entity_type] + and Features. + + Attributes: + entity_type (str): + The resource name of the EntityType from the + [ReadFeatureValuesRequest][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest]. + Value format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + feature_descriptors (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.FeatureDescriptor]): + List of Feature metadata corresponding to each piece of + [ReadFeatureValuesResponse.data][]. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + feature_descriptors = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ReadFeatureValuesResponse.FeatureDescriptor', + ) + + class EntityView(proto.Message): + r"""Entity view with Feature values. + Attributes: + entity_id (str): + ID of the requested entity. + data (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView.Data]): + Each piece of data holds the k requested values for one + requested Feature. If no values for the requested Feature + exist, the corresponding cell will be empty. This has the + same size and is in the same order as the features from the + header + [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1beta1.ReadFeatureValuesResponse.header]. + """ + + class Data(proto.Message): + r"""Container to hold value(s), successive in time, for one + Feature from the request. + + Attributes: + value (google.cloud.aiplatform_v1beta1.types.FeatureValue): + Feature value if a single value is requested. + values (google.cloud.aiplatform_v1beta1.types.FeatureValueList): + Feature values list if values, successive in + time, are requested. If the requested number of + values is greater than the number of existing + Feature values, nonexistent values are omitted + instead of being returned as empty. + """ + + value = proto.Field( + proto.MESSAGE, + number=1, + oneof='data', + message='FeatureValue', + ) + values = proto.Field( + proto.MESSAGE, + number=2, + oneof='data', + message='FeatureValueList', + ) + + entity_id = proto.Field( + proto.STRING, + number=1, + ) + data = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ReadFeatureValuesResponse.EntityView.Data', + ) + + header = proto.Field( + proto.MESSAGE, + number=1, + message=Header, + ) + entity_view = proto.Field( + proto.MESSAGE, + number=2, + message=EntityView, + ) + + +class StreamingReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. + + Attributes: + entity_type (str): + Required. The resource name of the entities' type. Value + format: + ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. + For example, for a machine learning model predicting user + clicks on a website, an EntityType ID could be "user". + entity_ids (Sequence[str]): + Required. IDs of entities to read Feature values of. For + example, for a machine learning model predicting user clicks + on a website, an entity ID could be "user_123". + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selector choosing Features of the + target EntityType. + """ + + entity_type = proto.Field( + proto.STRING, + number=1, + ) + entity_ids = proto.RepeatedField( + proto.STRING, + number=2, + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=3, + message=gca_feature_selector.FeatureSelector, + ) + + +class FeatureValue(proto.Message): + r"""Value for a feature. + NEXT ID: 15 + + Attributes: + bool_value (bool): + Bool type feature value. + double_value (float): + Double type feature value. + int64_value (int): + Int64 feature value. + string_value (str): + String feature value. + bool_array_value (google.cloud.aiplatform_v1beta1.types.BoolArray): + A list of bool type feature value. + double_array_value (google.cloud.aiplatform_v1beta1.types.DoubleArray): + A list of double type feature value. + int64_array_value (google.cloud.aiplatform_v1beta1.types.Int64Array): + A list of int64 type feature value. + string_array_value (google.cloud.aiplatform_v1beta1.types.StringArray): + A list of string type feature value. + bytes_value (bytes): + Bytes feature value. + metadata (google.cloud.aiplatform_v1beta1.types.FeatureValue.Metadata): + Output only. Metadata of feature value. + """ + + class Metadata(proto.Message): + r"""Metadata of feature value. + Attributes: + generate_time (google.protobuf.timestamp_pb2.Timestamp): + Feature generation timestamp. Typically, it + is provided by user at feature ingestion time. + If not, feature store will use the system + timestamp when the data is ingested into feature + store. + """ + + generate_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + + bool_value = proto.Field( + proto.BOOL, + number=1, + oneof='value', + ) + double_value = proto.Field( + proto.DOUBLE, + number=2, + oneof='value', + ) + int64_value = proto.Field( + proto.INT64, + number=5, + oneof='value', + ) + string_value = proto.Field( + proto.STRING, + number=6, + oneof='value', + ) + bool_array_value = proto.Field( + proto.MESSAGE, + number=7, + oneof='value', + message=types.BoolArray, + ) + double_array_value = proto.Field( + proto.MESSAGE, + number=8, + oneof='value', + message=types.DoubleArray, + ) + int64_array_value = proto.Field( + proto.MESSAGE, + number=11, + oneof='value', + message=types.Int64Array, + ) + string_array_value = proto.Field( + proto.MESSAGE, + number=12, + oneof='value', + message=types.StringArray, + ) + bytes_value = proto.Field( + proto.BYTES, + number=13, + oneof='value', + ) + metadata = proto.Field( + proto.MESSAGE, + number=14, + message=Metadata, + ) + + +class FeatureValueList(proto.Message): + r"""Container for list of values. + Attributes: + values (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureValue]): + A list of feature values. All of them should + be the same data type. + """ + + values = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='FeatureValue', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py new file mode 100644 index 0000000000..09c8e10fdc --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -0,0 +1,1500 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_selector as gca_feature_selector +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateFeaturestoreRequest', + 'GetFeaturestoreRequest', + 'ListFeaturestoresRequest', + 'ListFeaturestoresResponse', + 'UpdateFeaturestoreRequest', + 'DeleteFeaturestoreRequest', + 'ImportFeatureValuesRequest', + 'ImportFeatureValuesResponse', + 'BatchReadFeatureValuesRequest', + 'ExportFeatureValuesRequest', + 'DestinationFeatureSetting', + 'FeatureValueDestination', + 'ExportFeatureValuesResponse', + 'BatchReadFeatureValuesResponse', + 'CreateEntityTypeRequest', + 'GetEntityTypeRequest', + 'ListEntityTypesRequest', + 'ListEntityTypesResponse', + 'UpdateEntityTypeRequest', + 'DeleteEntityTypeRequest', + 'CreateFeatureRequest', + 'BatchCreateFeaturesRequest', + 'BatchCreateFeaturesResponse', + 'GetFeatureRequest', + 'ListFeaturesRequest', + 'ListFeaturesResponse', + 'SearchFeaturesRequest', + 'SearchFeaturesResponse', + 'UpdateFeatureRequest', + 'DeleteFeatureRequest', + 'CreateFeaturestoreOperationMetadata', + 'UpdateFeaturestoreOperationMetadata', + 'ImportFeatureValuesOperationMetadata', + 'ExportFeatureValuesOperationMetadata', + 'BatchReadFeatureValuesOperationMetadata', + 'CreateEntityTypeOperationMetadata', + 'CreateFeatureOperationMetadata', + 'BatchCreateFeaturesOperationMetadata', + }, +) + + +class CreateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. + + Attributes: + parent (str): + Required. The resource name of the Location to create + Featurestores. Format: + ``projects/{project}/locations/{location}'`` + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore to create. + featurestore_id (str): + Required. The ID to use for this Featurestore, which will + become the final component of the Featurestore's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within the project and location. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + featurestore = proto.Field( + proto.MESSAGE, + number=2, + message=gca_featurestore.Featurestore, + ) + featurestore_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore + resource. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListFeaturestoresRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Featurestores. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Lists the featurestores that match the filter expression. + The following fields are supported: + + - ``create_time``: Supports =, !=, <, >, <=, and >= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, <=, and >= + comparisons. Values must be in RFC 3339 format. + - ``online_serving_config.fixed_node_count``: Supports =, + !=, <, >, <=, and >= comparisons. + - ``labels``: Supports key-value equality and key presence. + + Examples: + + - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` + Featurestores created or updated after 2020-01-01. + - ``labels.env = "prod"`` Featurestores with label "env" + set to "prod". + page_size (int): + The maximum number of Featurestores to + return. The service may return fewer than this + value. If unspecified, at most 100 Featurestores + will be returned. The maximum value is 100; any + value greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported Fields: + + - ``create_time`` + - ``update_time`` + - ``online_serving_config.fixed_node_count`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListFeaturestoresResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. + + Attributes: + featurestores (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore]): + The Featurestores matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturestoresRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + featurestores = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_featurestore.Featurestore, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. + + Attributes: + featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): + Required. The Featurestore's ``name`` field is used to + identify the Featurestore to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Featurestore resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``labels`` + - ``online_serving_config.fixed_node_count`` + """ + + featurestore = proto.Field( + proto.MESSAGE, + number=1, + message=gca_featurestore.Featurestore, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteFeaturestoreRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. + + Attributes: + name (str): + Required. The name of the Featurestore to be deleted. + Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + force (bool): + If set to true, any EntityTypes and Features + for this Featurestore will also be deleted. + (Otherwise, the request will only work if the + Featurestore has no EntityTypes.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class ImportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + Attributes: + avro_source (google.cloud.aiplatform_v1beta1.types.AvroSource): + + bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): + + csv_source (google.cloud.aiplatform_v1beta1.types.CsvSource): + + feature_time_field (str): + Source column that holds the Feature + timestamp for all Feature values in each entity. + feature_time (google.protobuf.timestamp_pb2.Timestamp): + Single Feature timestamp for all entities + being imported. The timestamp must not have + higher than millisecond precision. + entity_type (str): + Required. The resource name of the EntityType grouping the + Features for which values are being imported. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` + entity_id_field (str): + Source column that holds entity IDs. If not provided, entity + IDs are extracted from the column named ``entity_id``. + feature_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest.FeatureSpec]): + Required. Specifications defining which Feature values to + import from the entity. The request fails if no + feature_specs are provided, and having multiple + feature_specs for one Feature is not allowed. + disable_online_serving (bool): + If set, data will not be imported for online + serving. This is typically used for backfilling, + where Feature generation timestamps are not in + the timestamp range needed for online serving. + worker_count (int): + Specifies the number of workers that are used + to write data to the Featurestore. Consider the + online serving capacity that you require to + achieve the desired import throughput without + interfering with online serving. The value must + be positive, and less than or equal to 100. If + not set, defaults to using 1 worker. The low + count ensures minimal impact on online serving + performance. + """ + + class FeatureSpec(proto.Message): + r"""Defines the Feature value(s) to import. + Attributes: + id (str): + Required. ID of the Feature to import values + of. This Feature must exist in the target + EntityType, or the request will fail. + source_field (str): + Source column to get the Feature values from. + If not set, uses the column with the same name + as the Feature ID. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + source_field = proto.Field( + proto.STRING, + number=2, + ) + + avro_source = proto.Field( + proto.MESSAGE, + number=2, + oneof='source', + message=io.AvroSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=3, + oneof='source', + message=io.BigQuerySource, + ) + csv_source = proto.Field( + proto.MESSAGE, + number=4, + oneof='source', + message=io.CsvSource, + ) + feature_time_field = proto.Field( + proto.STRING, + number=6, + oneof='feature_time_source', + ) + feature_time = proto.Field( + proto.MESSAGE, + number=7, + oneof='feature_time_source', + message=timestamp_pb2.Timestamp, + ) + entity_type = proto.Field( + proto.STRING, + number=1, + ) + entity_id_field = proto.Field( + proto.STRING, + number=5, + ) + feature_specs = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=FeatureSpec, + ) + disable_online_serving = proto.Field( + proto.BOOL, + number=9, + ) + worker_count = proto.Field( + proto.INT32, + number=11, + ) + + +class ImportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. + + Attributes: + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of Feature values that have been + imported by the operation. + invalid_row_count (int): + The number of rows in input source that weren't imported due + to either + + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). + """ + + imported_entity_count = proto.Field( + proto.INT64, + number=1, + ) + imported_feature_value_count = proto.Field( + proto.INT64, + number=2, + ) + invalid_row_count = proto.Field( + proto.INT64, + number=6, + ) + + +class BatchReadFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + (- Next Id: 6 -) + + Attributes: + csv_read_instances (google.cloud.aiplatform_v1beta1.types.CsvSource): + Each read instance consists of exactly one read timestamp + and one or more entity IDs identifying entities of the + corresponding EntityTypes whose Features are requested. + + Each output instance contains Feature values of requested + entities concatenated together as of the read time. + + An example read instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z``. + + An example output instance may be + ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value``. + + Timestamp in each read instance must be millisecond-aligned. + + ``csv_read_instances`` are read instances stored in a + plain-text CSV file. The header should be: + [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp + + The columns can be in any order. + + Values in the timestamp column must use the RFC 3339 format, + e.g. ``2012-07-30T10:43:17.123Z``. + featurestore (str): + Required. The resource name of the Featurestore from which + to query Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): + Required. Specifies output location and + format. + entity_type_specs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest.EntityTypeSpec]): + Required. Specifies EntityType grouping Features to read + values of and settings. Each EntityType referenced in + [BatchReadFeatureValuesRequest.entity_type_specs] must have + a column specifying entity IDs in tha EntityType in + [BatchReadFeatureValuesRequest.request][] . + """ + + class EntityTypeSpec(proto.Message): + r"""Selects Features of an EntityType to read values of and + specifies read settings. + + Attributes: + entity_type_id (str): + Required. ID of the EntityType to select Features. The + EntityType id is the + [entity_type_id][google.cloud.aiplatform.v1beta1.CreateEntityTypeRequest.entity_type_id] + specified during EntityType creation. + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selectors choosing which Feature + values to read from the EntityType. + settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): + Per-Feature settings for the batch read. + """ + + entity_type_id = proto.Field( + proto.STRING, + number=1, + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=2, + message=gca_feature_selector.FeatureSelector, + ) + settings = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='DestinationFeatureSetting', + ) + + csv_read_instances = proto.Field( + proto.MESSAGE, + number=3, + oneof='read_option', + message=io.CsvSource, + ) + featurestore = proto.Field( + proto.STRING, + number=1, + ) + destination = proto.Field( + proto.MESSAGE, + number=4, + message='FeatureValueDestination', + ) + entity_type_specs = proto.RepeatedField( + proto.MESSAGE, + number=7, + message=EntityTypeSpec, + ) + + +class ExportFeatureValuesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + + Attributes: + snapshot_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.SnapshotExport): + Exports Feature values of all entities of the + EntityType as of a snapshot time. + entity_type (str): + Required. The resource name of the EntityType from which to + export Feature values. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): + Required. Specifies destination location and + format. + feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): + Required. Selects Features to export values + of. + settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): + Per-Feature export settings. + """ + + class SnapshotExport(proto.Message): + r"""Describes exporting Feature values as of the snapshot + timestamp. + + Attributes: + snapshot_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + """ + + snapshot_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + + snapshot_export = proto.Field( + proto.MESSAGE, + number=3, + oneof='mode', + message=SnapshotExport, + ) + entity_type = proto.Field( + proto.STRING, + number=1, + ) + destination = proto.Field( + proto.MESSAGE, + number=4, + message='FeatureValueDestination', + ) + feature_selector = proto.Field( + proto.MESSAGE, + number=5, + message=gca_feature_selector.FeatureSelector, + ) + settings = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='DestinationFeatureSetting', + ) + + +class DestinationFeatureSetting(proto.Message): + r""" + Attributes: + feature_id (str): + Required. The ID of the Feature to apply the + setting to. + destination_field (str): + Specify the field name in the export + destination. If not specified, Feature ID is + used. + """ + + feature_id = proto.Field( + proto.STRING, + number=1, + ) + destination_field = proto.Field( + proto.STRING, + number=2, + ) + + +class FeatureValueDestination(proto.Message): + r"""A destination location for Feature values and format. + Attributes: + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + Output in BigQuery format. + [BigQueryDestination.output_uri][google.cloud.aiplatform.v1beta1.BigQueryDestination.output_uri] + in + [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1beta1.FeatureValueDestination.bigquery_destination] + must refer to a table. + tfrecord_destination (google.cloud.aiplatform_v1beta1.types.TFRecordDestination): + Output in TFRecord format. + + Below are the mapping from Feature value type in + Featurestore to Feature value type in TFRecord: + + :: + + Value type in Featurestore | Value type in TFRecord + DOUBLE, DOUBLE_ARRAY | FLOAT_LIST + INT64, INT64_ARRAY | INT64_LIST + STRING, STRING_ARRAY, BYTES | BYTES_LIST + true -> byte_string("true"), false -> byte_string("false") + BOOL, BOOL_ARRAY (true, false) | BYTES_LIST + csv_destination (google.cloud.aiplatform_v1beta1.types.CsvDestination): + Output in CSV format. Array Feature value + types are not allowed in CSV format. + """ + + bigquery_destination = proto.Field( + proto.MESSAGE, + number=1, + oneof='destination', + message=io.BigQueryDestination, + ) + tfrecord_destination = proto.Field( + proto.MESSAGE, + number=2, + oneof='destination', + message=io.TFRecordDestination, + ) + csv_destination = proto.Field( + proto.MESSAGE, + number=3, + oneof='destination', + message=io.CsvDestination, + ) + + +class ExportFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + """ + + +class BatchReadFeatureValuesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. + """ + + +class CreateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to create + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + The EntityType to create. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within a featurestore. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + entity_type = proto.Field( + proto.MESSAGE, + number=2, + message=gca_entity_type.EntityType, + ) + entity_type_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. + + Attributes: + name (str): + Required. The name of the EntityType resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListEntityTypesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Attributes: + parent (str): + Required. The resource name of the Featurestore to list + EntityTypes. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}`` + filter (str): + Lists the EntityTypes that match the filter expression. The + following filters are supported: + + - ``create_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + EntityTypes having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any EntityType which has a label + with 'env' as the key. + page_size (int): + The maximum number of EntityTypes to return. + The service may return fewer than this value. If + unspecified, at most 1000 EntityTypes will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. + + Supported fields: + + - ``entity_type_id`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListEntityTypesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. + + Attributes: + entity_types (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType]): + The EntityTypes matching the request. + next_page_token (str): + A token, which can be sent as + [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1beta1.ListEntityTypesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + entity_types = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_entity_type.EntityType, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateEntityTypeRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. + + Attributes: + entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): + Required. The EntityType's ``name`` field is used to + identify the EntityType to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the EntityType resource by the update. The fields + specified in the update_mask are relative to the resource, + not the full request. A field will be overwritten if it is + in the mask. If the user does not provide a mask then only + the non-empty fields present in the request will be + overwritten. Set the update_mask to ``*`` to override all + fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + """ + + entity_type = proto.Field( + proto.MESSAGE, + number=1, + message=gca_entity_type.EntityType, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteEntityTypeRequest(proto.Message): + r"""Request message for [FeaturestoreService.DeleteEntityTypes][]. + Attributes: + name (str): + Required. The name of the EntityType to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + force (bool): + If set to true, any Features for this + EntityType will also be deleted. (Otherwise, the + request will only work if the EntityType has no + Features.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class CreateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create a + Feature. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature to create. + feature_id (str): + Required. The ID to use for the Feature, which will become + the final component of the Feature's resource name. + + This value may be up to 60 characters, and valid characters + are ``[a-z0-9_]``. The first character cannot be a number. + + The value must be unique within an EntityType. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + feature = proto.Field( + proto.MESSAGE, + number=2, + message=gca_feature.Feature, + ) + feature_id = proto.Field( + proto.STRING, + number=3, + ) + + +class BatchCreateFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + parent (str): + Required. The resource name of the EntityType to create the + batch of Features under. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): + Required. The request message specifying the Features to + create. All Features must be created under the same parent + EntityType. The ``parent`` field in each child request + message can be omitted. If ``parent`` is set in a child + request, then the value must match the ``parent`` value in + this request message. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='CreateFeatureRequest', + ) + + +class BatchCreateFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): + The Features created. + """ + + features = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + + +class GetFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. + + Attributes: + name (str): + Required. The name of the Feature resource. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Attributes: + parent (str): + Required. The resource name of the Location to list + Features. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` + filter (str): + Lists the Features that match the filter expression. The + following filters are supported: + + - ``value_type``: Supports = and != comparisons. + - ``create_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``update_time``: Supports =, !=, <, >, >=, and <= + comparisons. Values must be in RFC 3339 format. + - ``labels``: Supports key-value equality as well as key + presence. + + Examples: + + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` + --> EntityTypes created or updated after + 2020-01-31T15:30:00.000000Z. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + 'env' as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 1000 Features will be + returned. The maximum value is 1000; any value + greater than 1000 will be coerced to 1000. + page_token (str): + A page token, received from a previous + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] + must match the call that provided the page token. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``feature_id`` + - ``value_type`` + - ``create_time`` + - ``update_time`` + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + latest_stats_count (int): + If set, return the most recent + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count] + of stats for each Feature in response. Valid value is [0, + 10]. If number of stats exists < + [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count], + return all existing stats. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + latest_stats_count = proto.Field( + proto.INT32, + number=7, + ) + + +class ListFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): + The Features matching the request. + next_page_token (str): + A token, which can be sent as + [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class SearchFeaturesRequest(proto.Message): + r"""Request message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Attributes: + location (str): + Required. The resource name of the Location to search + Features. Format: + ``projects/{project}/locations/{location}`` + query (str): + Query string that is a conjunction of field-restricted + queries and/or field-restricted filters. Field-restricted + queries and filters can be combined using ``AND`` to form a + conjunction. + + A field query is in the form FIELD:QUERY. This implicitly + checks if QUERY exists as a substring within Feature's + FIELD. The QUERY and the FIELD are converted to a sequence + of words (i.e. tokens) for comparison. This is done by: + + - Removing leading/trailing whitespace and tokenizing the + search value. Characters that are not one of alphanumeric + [a-zA-Z0-9], underscore [_], or asterisk [*] are treated + as delimiters for tokens. (*) is treated as a wildcard + that matches characters within a token. + - Ignoring case. + - Prepending an asterisk to the first and appending an + asterisk to the last token in QUERY. + + A QUERY must be either a singular token or a phrase. A + phrase is one or multiple words enclosed in double quotation + marks ("). With phrases, the order of the words is + important. Words in the phrase must be matching in order and + consecutively. + + Supported FIELDs for field-restricted queries: + + - ``feature_id`` + - ``description`` + - ``entity_type_id`` + + Examples: + + - ``feature_id: foo`` --> Matches a Feature with ID + containing the substring ``foo`` (eg. ``foo``, + ``foofeature``, ``barfoo``). + - ``feature_id: foo*feature`` --> Matches a Feature with ID + containing the substring ``foo*feature`` (eg. + ``foobarfeature``). + - ``feature_id: foo AND description: bar`` --> Matches a + Feature with ID containing the substring ``foo`` and + description containing the substring ``bar``. + + Besides field queries, the following exact-match filters are + supported. The exact-match filters do not support wildcards. + Unlike field-restricted queries, exact-match filters are + case-sensitive. + + - ``feature_id``: Supports = comparisons. + - ``description``: Supports = comparisons. Multi-token + filters should be enclosed in quotes. + - ``entity_type_id``: Supports = comparisons. + - ``value_type``: Supports = and != comparisons. + - ``labels``: Supports key-value equality as well as key + presence. + - ``featurestore_id``: Supports = comparisons. + + Examples: + + - ``description = "foo bar"`` --> Any Feature with + description exactly equal to ``foo bar`` + - ``value_type = DOUBLE`` --> Features whose type is + DOUBLE. + - ``labels.active = yes AND labels.env = prod`` --> + Features having both (active: yes) and (env: prod) + labels. + - ``labels.env: *`` --> Any Feature which has a label with + ``env`` as the key. + page_size (int): + The maximum number of Features to return. The + service may return fewer than this value. If + unspecified, at most 100 Features will be + returned. The maximum value is 100; any value + greater than 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures], + except ``page_size``, must match the call that provided the + page token. + """ + + location = proto.Field( + proto.STRING, + number=1, + ) + query = proto.Field( + proto.STRING, + number=3, + ) + page_size = proto.Field( + proto.INT32, + number=4, + ) + page_token = proto.Field( + proto.STRING, + number=5, + ) + + +class SearchFeaturesResponse(proto.Message): + r"""Response message for + [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. + + Attributes: + features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): + The Features matching the request. + + Fields returned: + + - ``name`` + - ``description`` + - ``labels`` + - ``create_time`` + - ``update_time`` + next_page_token (str): + A token, which can be sent as + [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.SearchFeaturesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + features = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. + + Attributes: + feature (google.cloud.aiplatform_v1beta1.types.Feature): + Required. The Feature's ``name`` field is used to identify + the Feature to be updated. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Field mask is used to specify the fields to be overwritten + in the Features resource by the update. The fields specified + in the update_mask are relative to the resource, not the + full request. A field will be overwritten if it is in the + mask. If the user does not provide a mask then only the + non-empty fields present in the request will be overwritten. + Set the update_mask to ``*`` to override all fields. + + Updatable fields: + + - ``description`` + - ``labels`` + - ``monitoring_config.snapshot_analysis.disabled`` + - ``monitoring_config.snapshot_analysis.monitoring_interval`` + """ + + feature = proto.Field( + proto.MESSAGE, + number=1, + message=gca_feature.Feature, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteFeatureRequest(proto.Message): + r"""Request message for + [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. + + Attributes: + name (str): + Required. The name of the Features to be deleted. Format: + ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform create Featurestore. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UpdateFeaturestoreOperationMetadata(proto.Message): + r"""Details of operations that perform update Featurestore. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class ImportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that perform import feature values. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore import + feature values. + imported_entity_count (int): + Number of entities that have been imported by + the operation. + imported_feature_value_count (int): + Number of feature values that have been + imported by the operation. + invalid_row_count (int): + The number of rows in input source that weren't imported due + to either + + - Not having any featureValues. + - Having a null entityId. + - Having a null timestamp. + - Not being parsable (applicable for CSV sources). + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + imported_entity_count = proto.Field( + proto.INT64, + number=2, + ) + imported_feature_value_count = proto.Field( + proto.INT64, + number=3, + ) + invalid_row_count = proto.Field( + proto.INT64, + number=6, + ) + + +class ExportFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that exports Features values. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore export + Feature values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class BatchReadFeatureValuesOperationMetadata(proto.Message): + r"""Details of operations that batch reads Feature values. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Featurestore batch + read Features values. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateEntityTypeOperationMetadata(proto.Message): + r"""Details of operations that perform create EntityType. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for EntityType. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateFeatureOperationMetadata(proto.Message): + r"""Details of operations that perform create Feature. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class BatchCreateFeaturesOperationMetadata(proto.Message): + r"""Details of operations that perform batch create Features. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Feature. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py new file mode 100644 index 0000000000..138d0c58e4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import study +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'HyperparameterTuningJob', + }, +) + + +class HyperparameterTuningJob(proto.Message): + r"""Represents a HyperparameterTuningJob. A + HyperparameterTuningJob has a Study specification and multiple + CustomJobs with identical CustomJob specification. + + Attributes: + name (str): + Output only. Resource name of the + HyperparameterTuningJob. + display_name (str): + Required. The display name of the + HyperparameterTuningJob. The name can be up to + 128 characters long and can be consist of any + UTF-8 characters. + study_spec (google.cloud.aiplatform_v1beta1.types.StudySpec): + Required. Study configuration of the + HyperparameterTuningJob. + max_trial_count (int): + Required. The desired total number of Trials. + parallel_trial_count (int): + Required. The desired number of Trials to run + in parallel. + max_failed_trial_count (int): + The number of failed Trials that need to be + seen before failing the HyperparameterTuningJob. + If set to 0, Vertex AI decides how many Trials + must fail before the whole job fails. + trial_job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): + Required. The spec of a trial job. The same + spec applies to the CustomJobs created in all + the trials. + trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): + Output only. Trials of the + HyperparameterTuningJob. + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the job. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + HyperparameterTuningJob was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the HyperparameterTuningJob for the + first time entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the HyperparameterTuningJob entered + any of the following states: ``JOB_STATE_SUCCEEDED``, + ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + HyperparameterTuningJob was most recently + updated. + error (google.rpc.status_pb2.Status): + Output only. Only populated when job's state is + JOB_STATE_FAILED or JOB_STATE_CANCELLED. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob.LabelsEntry]): + The labels with user-defined metadata to + organize HyperparameterTuningJobs. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key options for a + HyperparameterTuningJob. If this is set, then + all resources created by the + HyperparameterTuningJob will be encrypted with + the provided encryption key. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + study_spec = proto.Field( + proto.MESSAGE, + number=4, + message=study.StudySpec, + ) + max_trial_count = proto.Field( + proto.INT32, + number=5, + ) + parallel_trial_count = proto.Field( + proto.INT32, + number=6, + ) + max_failed_trial_count = proto.Field( + proto.INT32, + number=7, + ) + trial_job_spec = proto.Field( + proto.MESSAGE, + number=8, + message=custom_job.CustomJobSpec, + ) + trials = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=study.Trial, + ) + state = proto.Field( + proto.ENUM, + number=10, + enum=job_state.JobState, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + error = proto.Field( + proto.MESSAGE, + number=15, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=16, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=17, + message=gca_encryption_spec.EncryptionSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py new file mode 100644 index 0000000000..fef443aa5e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Index', + }, +) + + +class Index(proto.Message): + r"""A representation of a collection of database items organized + in a way that allows for approximate nearest neighbor (a.k.a + ANN) algorithms search. + + Attributes: + name (str): + Output only. The resource name of the Index. + display_name (str): + Required. The display name of the Index. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Index. + metadata_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing additional information about the Index, + that is specific to it. Unset if the Index does not have any + additional information. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + metadata (google.protobuf.struct_pb2.Value): + An additional information about the Index; the schema of the + metadata can be found in + [metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri]. + deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndexRef]): + Output only. The pointers to DeployedIndexes + created from this Index. An Index can be only + deleted if all its DeployedIndexes had been + undeployed first. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Index.LabelsEntry]): + The labels with user-defined metadata to + organize your Indexes. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Index was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Index was most recently + updated. This also includes any update to the contents of + the Index. Note that Operations working on this Index may + have their + [Operations.metadata.generic_metadata.update_time] + [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] + a little after the value of this timestamp, yet that does + not mean their results are not already reflected in the + Index. Result of any successfully completed Operation on the + Index is reflected in it. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=4, + ) + metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + deployed_indexes = proto.RepeatedField( + proto.MESSAGE, + number=7, + message=deployed_index_ref.DeployedIndexRef, + ) + etag = proto.Field( + proto.STRING, + number=8, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=9, + ) + create_time = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py new file mode 100644 index 0000000000..d496901da2 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'IndexEndpoint', + 'DeployedIndex', + 'DeployedIndexAuthConfig', + 'IndexPrivateEndpoints', + }, +) + + +class IndexEndpoint(proto.Message): + r"""Indexes are deployed into it. An IndexEndpoint can have + multiple DeployedIndexes. + + Attributes: + name (str): + Output only. The resource name of the + IndexEndpoint. + display_name (str): + Required. The display name of the + IndexEndpoint. The name can be up to 128 + characters long and can consist of any UTF-8 + characters. + description (str): + The description of the IndexEndpoint. + deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndex]): + Output only. The indexes deployed in this + endpoint. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint.LabelsEntry]): + The labels with user-defined metadata to + organize your IndexEndpoints. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + IndexEndpoint was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + IndexEndpoint was last updated. This timestamp + is not updated when the endpoint's + DeployedIndexes are updated, e.g. due to updates + of the original Indexes they are the deployments + of. + network (str): + Required. Immutable. The full name of the Google Compute + Engine + `network `__ + to which the IndexEndpoint should be peered. + + Private services access must already be configured for the + network. If left unspecified, the Endpoint is not peered + with any network. + + `Format `__: + projects/{project}/global/networks/{network}. Where + {project} is a project number, as in '12345', and {network} + is network name. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + deployed_indexes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='DeployedIndex', + ) + etag = proto.Field( + proto.STRING, + number=5, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + create_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + network = proto.Field( + proto.STRING, + number=9, + ) + + +class DeployedIndex(proto.Message): + r"""A deployment of an Index. IndexEndpoints contain one or more + DeployedIndexes. + + Attributes: + id (str): + Required. The user specified ID of the + DeployedIndex. The ID can be up to 128 + characters long and must start with a letter and + only contain letters, numbers, and underscores. + The ID must be unique within the project it is + created in. + index (str): + Required. The name of the Index this is the + deployment of. We may refer to this Index as the + DeployedIndex's "original" Index. + display_name (str): + The display name of the DeployedIndex. If not provided upon + creation, the Index's display_name is used. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the DeployedIndex + was created. + private_endpoints (google.cloud.aiplatform_v1beta1.types.IndexPrivateEndpoints): + Output only. Provides paths for users to send requests + directly to the deployed index services running on Cloud via + private services access. This field is populated if + [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] + is configured. + index_sync_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The DeployedIndex may depend on various data on + its original Index. Additionally when certain changes to the + original Index are being done (e.g. when what the Index + contains is being changed) the DeployedIndex may be + asynchronously updated in the background to reflect this + changes. If this timestamp's value is at least the + [Index.update_time][google.cloud.aiplatform.v1beta1.Index.update_time] + of the original Index, it means that this DeployedIndex and + the original Index are in sync. If this timestamp is older, + then to see which updates this DeployedIndex already + contains (and which not), one must + [list][Operations.ListOperations] [Operations][Operation] + [working][Operation.name] on the original Index. Only the + successfully completed Operations with + [Operations.metadata.generic_metadata.update_time] + [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] + equal or before this sync time are contained in this + DeployedIndex. + automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): + Optional. A description of resources that the DeployedIndex + uses, which to large degree are decided by Vertex AI, and + optionally allows only a modest additional configuration. If + min_replica_count is not set, the default value is 1. If + max_replica_count is not set, the default value is + min_replica_count. The max allowed replica count is 1000. + + The user is billed for the resources (at least their minimal + amount) even if the DeployedIndex receives no traffic. + enable_access_logging (bool): + Optional. If true, private endpoint's access + logs are sent to StackDriver Logging. + These logs are like standard server access logs, + containing information like timestamp and + latency for each MatchRequest. + Note that Stackdriver logs may incur a cost, + especially if the deployed index receives a high + queries per second rate (QPS). Estimate your + costs before enabling this option. + deployed_index_auth_config (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig): + Optional. If set, the authentication is + enabled for the private endpoint. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + index = proto.Field( + proto.STRING, + number=2, + ) + display_name = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + private_endpoints = proto.Field( + proto.MESSAGE, + number=5, + message='IndexPrivateEndpoints', + ) + index_sync_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + automatic_resources = proto.Field( + proto.MESSAGE, + number=7, + message=machine_resources.AutomaticResources, + ) + enable_access_logging = proto.Field( + proto.BOOL, + number=8, + ) + deployed_index_auth_config = proto.Field( + proto.MESSAGE, + number=9, + message='DeployedIndexAuthConfig', + ) + + +class DeployedIndexAuthConfig(proto.Message): + r"""Used to set up the auth on the DeployedIndex's private + endpoint. + + Attributes: + auth_provider (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig.AuthProvider): + Defines the authentication provider that the + DeployedIndex uses. + """ + + class AuthProvider(proto.Message): + r"""Configuration for an authentication provider, including support for + `JSON Web Token + (JWT) `__. + + Attributes: + audiences (Sequence[str]): + The list of JWT + `audiences `__. + that are allowed to access. A JWT containing any of these + audiences will be accepted. + allowed_issuers (Sequence[str]): + A list of allowed JWT issuers. Each entry must be a valid + Google service account, in the following format: + + ``service-account-name@project-id.iam.gserviceaccount.com`` + """ + + audiences = proto.RepeatedField( + proto.STRING, + number=1, + ) + allowed_issuers = proto.RepeatedField( + proto.STRING, + number=2, + ) + + auth_provider = proto.Field( + proto.MESSAGE, + number=1, + message=AuthProvider, + ) + + +class IndexPrivateEndpoints(proto.Message): + r"""IndexPrivateEndpoints proto is used to provide paths for + users to send requests via private services access. + + Attributes: + match_grpc_address (str): + Output only. The ip address used to send + match gRPC requests. + """ + + match_grpc_address = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py new file mode 100644 index 0000000000..fbf4cd9f7d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py @@ -0,0 +1,343 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateIndexEndpointRequest', + 'CreateIndexEndpointOperationMetadata', + 'GetIndexEndpointRequest', + 'ListIndexEndpointsRequest', + 'ListIndexEndpointsResponse', + 'UpdateIndexEndpointRequest', + 'DeleteIndexEndpointRequest', + 'DeployIndexRequest', + 'DeployIndexResponse', + 'DeployIndexOperationMetadata', + 'UndeployIndexRequest', + 'UndeployIndexResponse', + 'UndeployIndexOperationMetadata', + }, +) + + +class CreateIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + IndexEndpoint in. Format: + ``projects/{project}/locations/{location}`` + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + index_endpoint = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index_endpoint.IndexEndpoint, + ) + + +class CreateIndexEndpointOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] + + Attributes: + name (str): + Required. The name of the IndexEndpoint resource. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListIndexEndpointsRequest(proto.Message): + r"""Request message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the IndexEndpoints. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``index_endpoint`` supports = and !=. ``index_endpoint`` + represents the IndexEndpoint ID, ie. the last segment of + the IndexEndpoint's + [resourcename][google.cloud.aiplatform.v1beta1.IndexEndpoint.name]. + - ``display_name`` supports =, != and regex() (uses + `re2 `__ + syntax) + - ``labels`` supports general map functions that is: + ``labels.key=value`` - key:value equality + ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a + key"`. + + Some examples: + + - ``index_endpoint="1"`` + - ``display_name="myDisplayName"`` + - \`regex(display_name, "^A") -> The display name starts + with an A. + - ``labels.myKey="myValue"`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsResponse.next_page_token] + of the previous + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListIndexEndpointsResponse(proto.Message): + r"""Response message for + [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. + + Attributes: + index_endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint]): + List of IndexEndpoints in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + index_endpoints = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.IndexEndpoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. + + Attributes: + index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): + Required. The IndexEndpoint which replaces + the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. See + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + index_endpoint = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.IndexEndpoint, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteIndexEndpointRequest(proto.Message): + r"""Request message for + [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. + + Attributes: + name (str): + Required. The name of the IndexEndpoint resource to be + deleted. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class DeployIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource into which + to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be created + within the IndexEndpoint. + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index_endpoint.DeployedIndex, + ) + + +class DeployIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + Attributes: + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + The DeployedIndex that had been deployed in + the IndexEndpoint. + """ + + deployed_index = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index_endpoint.DeployedIndex, + ) + + +class DeployIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UndeployIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource from which + to undeploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index_id (str): + Required. The ID of the DeployedIndex to be + undeployed from the IndexEndpoint. + """ + + index_endpoint = proto.Field( + proto.STRING, + number=1, + ) + deployed_index_id = proto.Field( + proto.STRING, + number=2, + ) + + +class UndeployIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + """ + + +class UndeployIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py new file mode 100644 index 0000000000..01bca02406 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py @@ -0,0 +1,354 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateIndexRequest', + 'CreateIndexOperationMetadata', + 'GetIndexRequest', + 'ListIndexesRequest', + 'ListIndexesResponse', + 'UpdateIndexRequest', + 'UpdateIndexOperationMetadata', + 'DeleteIndexRequest', + 'NearestNeighborSearchOperationMetadata', + }, +) + + +class CreateIndexRequest(proto.Message): + r"""Request message for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Index in. Format: + ``projects/{project}/locations/{location}`` + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + index = proto.Field( + proto.MESSAGE, + number=2, + message=gca_index.Index, + ) + + +class CreateIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): + The operation metadata with regard to + Matching Engine Index operation. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + nearest_neighbor_search_operation_metadata = proto.Field( + proto.MESSAGE, + number=2, + message='NearestNeighborSearchOperationMetadata', + ) + + +class GetIndexRequest(proto.Message): + r"""Request message for + [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] + + Attributes: + name (str): + Required. The name of the Index resource. Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListIndexesRequest(proto.Message): + r"""Request message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the Indexes. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListIndexesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexesResponse.next_page_token] + of the previous + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListIndexesResponse(proto.Message): + r"""Response message for + [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. + + Attributes: + indexes (Sequence[google.cloud.aiplatform_v1beta1.types.Index]): + List of indexes in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListIndexesRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + indexes = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_index.Index, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateIndexRequest(proto.Message): + r"""Request message for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + + Attributes: + index (google.cloud.aiplatform_v1beta1.types.Index): + Required. The Index which updates the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + index = proto.Field( + proto.MESSAGE, + number=1, + message=gca_index.Index, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): + The operation metadata with regard to + Matching Engine Index operation. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + nearest_neighbor_search_operation_metadata = proto.Field( + proto.MESSAGE, + number=2, + message='NearestNeighborSearchOperationMetadata', + ) + + +class DeleteIndexRequest(proto.Message): + r"""Request message for + [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. + + Attributes: + name (str): + Required. The name of the Index resource to be deleted. + Format: + ``projects/{project}/locations/{location}/indexes/{index}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class NearestNeighborSearchOperationMetadata(proto.Message): + r"""Runtime operation metadata with regard to Matching Engine + Index. + + Attributes: + content_validation_stats (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.ContentValidationStats]): + The validation stats of the content (per file) to be + inserted or updated on the Matching Engine Index resource. + Populated if contentsDeltaUri is provided as part of + [Index.metadata][google.cloud.aiplatform.v1beta1.Index.metadata]. + Please note that, currently for those files that are broken + or has unsupported file format, we will not have the stats + for those files. + """ + + class RecordError(proto.Message): + r""" + Attributes: + error_type (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType): + The error type of this record. + error_message (str): + A human-readable message that is shown to the user to help + them fix the error. Note that this message may change from + time to time, your code should check against error_type as + the source of truth. + source_gcs_uri (str): + Cloud Storage URI pointing to the original + file in user's bucket. + embedding_id (str): + Empty if the embedding id is failed to parse. + raw_record (str): + The original content of this record. + """ + class RecordErrorType(proto.Enum): + r"""""" + ERROR_TYPE_UNSPECIFIED = 0 + EMPTY_LINE = 1 + INVALID_JSON_SYNTAX = 2 + INVALID_CSV_SYNTAX = 3 + INVALID_AVRO_SYNTAX = 4 + INVALID_EMBEDDING_ID = 5 + EMBEDDING_SIZE_MISMATCH = 6 + NAMESPACE_MISSING = 7 + + error_type = proto.Field( + proto.ENUM, + number=1, + enum='NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType', + ) + error_message = proto.Field( + proto.STRING, + number=2, + ) + source_gcs_uri = proto.Field( + proto.STRING, + number=3, + ) + embedding_id = proto.Field( + proto.STRING, + number=4, + ) + raw_record = proto.Field( + proto.STRING, + number=5, + ) + + class ContentValidationStats(proto.Message): + r""" + Attributes: + source_gcs_uri (str): + Cloud Storage URI pointing to the original + file in user's bucket. + valid_record_count (int): + Number of records in this file that were + successfully processed. + invalid_record_count (int): + Number of records in this file we skipped due + to validate errors. + partial_errors (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError]): + The detail information of the partial + failures encountered for those invalid records + that couldn't be parsed. Up to 50 partial errors + will be reported. + """ + + source_gcs_uri = proto.Field( + proto.STRING, + number=1, + ) + valid_record_count = proto.Field( + proto.INT64, + number=2, + ) + invalid_record_count = proto.Field( + proto.INT64, + number=3, + ) + partial_errors = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='NearestNeighborSearchOperationMetadata.RecordError', + ) + + content_validation_stats = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=ContentValidationStats, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py new file mode 100644 index 0000000000..c9dc988e79 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'AvroSource', + 'CsvSource', + 'GcsSource', + 'GcsDestination', + 'BigQuerySource', + 'BigQueryDestination', + 'CsvDestination', + 'TFRecordDestination', + 'ContainerRegistryDestination', + }, +) + + +class AvroSource(proto.Message): + r"""The storage details for Avro input content. + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + message='GcsSource', + ) + + +class CsvSource(proto.Message): + r"""The storage details for CSV input content. + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + Required. Google Cloud Storage location. + """ + + gcs_source = proto.Field( + proto.MESSAGE, + number=1, + message='GcsSource', + ) + + +class GcsSource(proto.Message): + r"""The Google Cloud Storage location for the input content. + Attributes: + uris (Sequence[str]): + Required. Google Cloud Storage URI(-s) to the + input file(s). May contain wildcards. For more + information on wildcards, see + https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + """ + + uris = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +class GcsDestination(proto.Message): + r"""The Google Cloud Storage location where the output is to be + written to. + + Attributes: + output_uri_prefix (str): + Required. Google Cloud Storage URI to output + directory. If the uri doesn't end with '/', a + '/' will be automatically appended. The + directory is created if it doesn't exist. + """ + + output_uri_prefix = proto.Field( + proto.STRING, + number=1, + ) + + +class BigQuerySource(proto.Message): + r"""The BigQuery location for the input content. + Attributes: + input_uri (str): + Required. BigQuery URI to a table, up to 2000 characters + long. Accepted forms: + + - BigQuery path. For example: + ``bq://projectId.bqDatasetId.bqTableId``. + """ + + input_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class BigQueryDestination(proto.Message): + r"""The BigQuery location for the output content. + Attributes: + output_uri (str): + Required. BigQuery URI to a project or table, up to 2000 + characters long. + + When only the project is specified, the Dataset and Table is + created. When the full table reference is specified, the + Dataset must exist and table must not exist. + + Accepted forms: + + - BigQuery path. For example: ``bq://projectId`` or + ``bq://projectId.bqDatasetId.bqTableId``. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +class CsvDestination(proto.Message): + r"""The storage details for CSV output content. + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + message='GcsDestination', + ) + + +class TFRecordDestination(proto.Message): + r"""The storage details for TFRecord output content. + Attributes: + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Required. Google Cloud Storage location. + """ + + gcs_destination = proto.Field( + proto.MESSAGE, + number=1, + message='GcsDestination', + ) + + +class ContainerRegistryDestination(proto.Message): + r"""The Container Registry location for the container image. + Attributes: + output_uri (str): + Required. Container Registry URI of a container image. Only + Google Container Registry and Artifact Registry are + supported now. Accepted forms: + + - Google Container Registry path. For example: + ``gcr.io/projectId/imageName:tag``. + + - Artifact Registry path. For example: + ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. + + If a tag is not specified, "latest" will be used as the + default tag. + """ + + output_uri = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py new file mode 100644 index 0000000000..49932da1d8 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py @@ -0,0 +1,1068 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateCustomJobRequest', + 'GetCustomJobRequest', + 'ListCustomJobsRequest', + 'ListCustomJobsResponse', + 'DeleteCustomJobRequest', + 'CancelCustomJobRequest', + 'CreateDataLabelingJobRequest', + 'GetDataLabelingJobRequest', + 'ListDataLabelingJobsRequest', + 'ListDataLabelingJobsResponse', + 'DeleteDataLabelingJobRequest', + 'CancelDataLabelingJobRequest', + 'CreateHyperparameterTuningJobRequest', + 'GetHyperparameterTuningJobRequest', + 'ListHyperparameterTuningJobsRequest', + 'ListHyperparameterTuningJobsResponse', + 'DeleteHyperparameterTuningJobRequest', + 'CancelHyperparameterTuningJobRequest', + 'CreateBatchPredictionJobRequest', + 'GetBatchPredictionJobRequest', + 'ListBatchPredictionJobsRequest', + 'ListBatchPredictionJobsResponse', + 'DeleteBatchPredictionJobRequest', + 'CancelBatchPredictionJobRequest', + 'CreateModelDeploymentMonitoringJobRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', + 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', + 'GetModelDeploymentMonitoringJobRequest', + 'ListModelDeploymentMonitoringJobsRequest', + 'ListModelDeploymentMonitoringJobsResponse', + 'UpdateModelDeploymentMonitoringJobRequest', + 'DeleteModelDeploymentMonitoringJobRequest', + 'PauseModelDeploymentMonitoringJobRequest', + 'ResumeModelDeploymentMonitoringJobRequest', + 'UpdateModelDeploymentMonitoringJobOperationMetadata', + }, +) + + +class CreateCustomJobRequest(proto.Message): + r"""Request message for + [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + CustomJob in. Format: + ``projects/{project}/locations/{location}`` + custom_job (google.cloud.aiplatform_v1beta1.types.CustomJob): + Required. The CustomJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + custom_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_custom_job.CustomJob, + ) + + +class GetCustomJobRequest(proto.Message): + r"""Request message for + [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob resource. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListCustomJobsRequest(proto.Message): + r"""Request message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + CustomJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsResponse.next_page_token] + of the previous + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListCustomJobsResponse(proto.Message): + r"""Response message for + [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] + + Attributes: + custom_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob]): + List of CustomJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + custom_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_custom_job.CustomJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteCustomJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob resource to be deleted. + Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelCustomJobRequest(proto.Message): + r"""Request message for + [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. + + Attributes: + name (str): + Required. The name of the CustomJob to cancel. Format: + ``projects/{project}/locations/{location}/customJobs/{custom_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. + + Attributes: + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + data_labeling_job (google.cloud.aiplatform_v1beta1.types.DataLabelingJob): + Required. The DataLabelingJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + data_labeling_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_data_labeling_job.DataLabelingJob, + ) + + +class GetDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListDataLabelingJobsRequest(proto.Message): + r"""Request message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + + Attributes: + parent (str): + Required. The parent of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. FieldMask represents a + set of symbolic field paths. For example, the mask can be + ``paths: "name"``. The "name" here is a field in + DataLabelingJob. If this field is not set, all fields of the + DataLabelingJob are returned. + order_by (str): + A comma-separated list of fields to order by, sorted in + ascending order by default. Use ``desc`` after a field name + for descending. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by = proto.Field( + proto.STRING, + number=6, + ) + + +class ListDataLabelingJobsResponse(proto.Message): + r"""Response message for + [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. + + Attributes: + data_labeling_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob]): + A list of DataLabelingJobs that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + data_labeling_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_data_labeling_job.DataLabelingJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob to be deleted. + Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelDataLabelingJobRequest(proto.Message): + r"""Request message for + [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. + + Attributes: + name (str): + Required. The name of the DataLabelingJob. Format: + ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + HyperparameterTuningJob in. Format: + ``projects/{project}/locations/{location}`` + hyperparameter_tuning_job (google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob): + Required. The HyperparameterTuningJob to + create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + hyperparameter_tuning_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, + ) + + +class GetHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob resource. + Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListHyperparameterTuningJobsRequest(proto.Message): + r"""Request message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + HyperparameterTuningJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsResponse.next_page_token] + of the previous + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListHyperparameterTuningJobsResponse(proto.Message): + r"""Response message for + [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] + + Attributes: + hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob]): + List of HyperparameterTuningJobs in the requested page. + [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials] + of the jobs will be not be returned. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + hyperparameter_tuning_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob resource + to be deleted. Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelHyperparameterTuningJobRequest(proto.Message): + r"""Request message for + [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. + + Attributes: + name (str): + Required. The name of the HyperparameterTuningJob to cancel. + Format: + ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + BatchPredictionJob in. Format: + ``projects/{project}/locations/{location}`` + batch_prediction_job (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob): + Required. The BatchPredictionJob to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + batch_prediction_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_batch_prediction_job.BatchPredictionJob, + ) + + +class GetBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob resource. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListBatchPredictionJobsRequest(proto.Message): + r"""Request message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + BatchPredictionJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + + Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + - ``model_display_name`` supports = and != + + Some examples of using the filter are: + + - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` + + - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` + + - ``NOT display_name="my_job"`` + + - ``state="JOB_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsResponse.next_page_token] + of the previous + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListBatchPredictionJobsResponse(proto.Message): + r"""Response message for + [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] + + Attributes: + batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob]): + List of BatchPredictionJobs in the requested + page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + batch_prediction_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_batch_prediction_job.BatchPredictionJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelBatchPredictionJobRequest(proto.Message): + r"""Request message for + [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. + + Attributes: + name (str): + Required. The name of the BatchPredictionJob to cancel. + Format: + ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. + + Attributes: + parent (str): + Required. The parent of the ModelDeploymentMonitoringJob. + Format: ``projects/{project}/locations/{location}`` + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The ModelDeploymentMonitoringJob to + create + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + model_deployment_monitoring_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + + +class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): + r"""Request message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Attributes: + model_deployment_monitoring_job (str): + Required. ModelDeploymentMonitoring Job resource name. + Format: + \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} + deployed_model_id (str): + Required. The DeployedModel ID of the + [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + feature_display_name (str): + The feature display name. If specified, only return the + stats belonging to this feature. Format: + [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name], + example: "user_destination". + objectives (Sequence[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest.StatsAnomaliesObjective]): + Required. Objectives of the stats to + retrieve. + page_size (int): + The standard list page size. + page_token (str): + A page token received from a previous + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] + call. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The earliest timestamp of stats being + generated. If not set, indicates fetching stats + till the earliest possible one. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The latest timestamp of stats being + generated. If not set, indicates feching stats + till the latest possible one. + """ + + class StatsAnomaliesObjective(proto.Message): + r"""Stats requested for specific objective. + Attributes: + type_ (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): + + top_feature_count (int): + If set, all attribution scores between + [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] + and + [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] + are fetched, and page token doesn't take affect in this + case. Only used to retrieve attribution score for the top + Features which has the highest attribution score in the + latest monitoring run. + """ + + type_ = proto.Field( + proto.ENUM, + number=1, + enum=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringObjectiveType, + ) + top_feature_count = proto.Field( + proto.INT32, + number=4, + ) + + model_deployment_monitoring_job = proto.Field( + proto.STRING, + number=1, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + feature_display_name = proto.Field( + proto.STRING, + number=3, + ) + objectives = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=StatsAnomaliesObjective, + ) + page_size = proto.Field( + proto.INT32, + number=5, + ) + page_token = proto.Field( + proto.STRING, + number=6, + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + + +class SearchModelDeploymentMonitoringStatsAnomaliesResponse(proto.Message): + r"""Response message for + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. + + Attributes: + monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies]): + Stats retrieved for requested objectives. There are at most + 1000 + [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats] + in the response. + next_page_token (str): + The page token that can be used by the next + [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] + call. + """ + + @property + def raw_page(self): + return self + + monitoring_stats = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelDeploymentMonitoringJobsRequest(proto.Message): + r"""Request message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Attributes: + parent (str): + Required. The parent of the ModelDeploymentMonitoringJob. + Format: ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelDeploymentMonitoringJobsResponse(proto.Message): + r"""Response message for + [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. + + Attributes: + model_deployment_monitoring_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob]): + A list of ModelDeploymentMonitoringJobs that + matches the specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + model_deployment_monitoring_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + + Attributes: + model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): + Required. The model monitoring configuration + which replaces the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the + resource. + """ + + model_deployment_monitoring_job = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the model monitoring job to + delete. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class PauseModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to pause. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ResumeModelDeploymentMonitoringJobRequest(proto.Message): + r"""Request message for + [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. + + Attributes: + name (str): + Required. The resource name of the + ModelDeploymentMonitoringJob to resume. Format: + ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateModelDeploymentMonitoringJobOperationMetadata(proto.Message): + r"""Runtime operation information for + [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py new file mode 100644 index 0000000000..677ba3b002 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'JobState', + }, +) + + +class JobState(proto.Enum): + r"""Describes the state of a job.""" + JOB_STATE_UNSPECIFIED = 0 + JOB_STATE_QUEUED = 1 + JOB_STATE_PENDING = 2 + JOB_STATE_RUNNING = 3 + JOB_STATE_SUCCEEDED = 4 + JOB_STATE_FAILED = 5 + JOB_STATE_CANCELLING = 6 + JOB_STATE_CANCELLED = 7 + JOB_STATE_PAUSED = 8 + JOB_STATE_EXPIRED = 9 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py new file mode 100644 index 0000000000..597674dbab --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'LineageSubgraph', + }, +) + + +class LineageSubgraph(proto.Message): + r"""A subgraph of the overall lineage graph. Event edges connect + Artifact and Execution nodes. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): + The Artifact nodes in the subgraph. + executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): + The Execution nodes in the subgraph. + events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): + The Event edges between Artifacts and + Executions in the subgraph. + """ + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=artifact.Artifact, + ) + executions = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=execution.Execution, + ) + events = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=event.Event, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py new file mode 100644 index 0000000000..fa6b9b1e1b --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py @@ -0,0 +1,308 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import accelerator_type as gca_accelerator_type + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'MachineSpec', + 'DedicatedResources', + 'AutomaticResources', + 'BatchDedicatedResources', + 'ResourcesConsumed', + 'DiskSpec', + 'AutoscalingMetricSpec', + }, +) + + +class MachineSpec(proto.Message): + r"""Specification of a single machine. + Attributes: + machine_type (str): + Immutable. The type of the machine. + + See the `list of machine types supported for + prediction `__ + + See the `list of machine types supported for custom + training `__. + + For + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + this field is optional, and the default value is + ``n1-standard-2``. For + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] + or as part of + [WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec] + this field is required. + accelerator_type (google.cloud.aiplatform_v1beta1.types.AcceleratorType): + Immutable. The type of accelerator(s) that may be attached + to the machine as per + [accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]. + accelerator_count (int): + The number of accelerators to attach to the + machine. + """ + + machine_type = proto.Field( + proto.STRING, + number=1, + ) + accelerator_type = proto.Field( + proto.ENUM, + number=2, + enum=gca_accelerator_type.AcceleratorType, + ) + accelerator_count = proto.Field( + proto.INT32, + number=3, + ) + + +class DedicatedResources(proto.Message): + r"""A description of resources that are dedicated to a + DeployedModel, and that need a higher degree of manual + configuration. + + Attributes: + machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): + Required. Immutable. The specification of a + single machine used by the prediction. + min_replica_count (int): + Required. Immutable. The minimum number of machine replicas + this DeployedModel will be always deployed on. If traffic + against it increases, it may dynamically be deployed onto + more replicas, and as traffic decreases, some of these extra + replicas may be freed. Note: if + [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] + is above 0, currently the model will be always deployed + precisely on + [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count]. + max_replica_count (int): + Immutable. The maximum number of replicas this DeployedModel + may be deployed on when the traffic against it increases. If + the requested value is too large, the deployment will error, + but if deployment succeeds then the ability to scale the + model to that many replicas is guaranteed (barring service + outages). If traffic against the DeployedModel increases + beyond what its replicas at maximum may handle, a portion of + the traffic will be dropped. If this value is not provided, + will use + [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count] + as the default value. + autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1beta1.types.AutoscalingMetricSpec]): + Immutable. The metric specifications that overrides a + resource utilization metric (CPU utilization, accelerator's + duty cycle, and so on) target value (default to 60 if not + set). At most one entry is allowed per metric. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] + is above 0, the autoscaling will be based on both CPU + utilization and accelerator's duty cycle metrics and scale + up when either metrics exceeds its target value while scale + down if both metrics are under their target value. The + default target value is 60 for both metrics. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] + is 0, the autoscaling will be based on CPU utilization + metric only with default target value 60 if not explicitly + set. + + For example, in the case of Online Prediction, if you want + to override target CPU utilization to 80, you should set + [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.metric_name] + to + ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + and + [autoscaling_metric_specs.target][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.target] + to ``80``. + """ + + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message='MachineSpec', + ) + min_replica_count = proto.Field( + proto.INT32, + number=2, + ) + max_replica_count = proto.Field( + proto.INT32, + number=3, + ) + autoscaling_metric_specs = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='AutoscalingMetricSpec', + ) + + +class AutomaticResources(proto.Message): + r"""A description of resources that to large degree are decided + by Vertex AI, and require only a modest additional + configuration. Each Model supporting these resources documents + its specific guidelines. + + Attributes: + min_replica_count (int): + Immutable. The minimum number of replicas this DeployedModel + will be always deployed on. If traffic against it increases, + it may dynamically be deployed onto more replicas up to + [max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count], + and as traffic decreases, some of these extra replicas may + be freed. If the requested value is too large, the + deployment will error. + max_replica_count (int): + Immutable. The maximum number of replicas + this DeployedModel may be deployed on when the + traffic against it increases. If the requested + value is too large, the deployment will error, + but if deployment succeeds then the ability to + scale the model to that many replicas is + guaranteed (barring service outages). If traffic + against the DeployedModel increases beyond what + its replicas at maximum may handle, a portion of + the traffic will be dropped. If this value is + not provided, a no upper bound for scaling under + heavy traffic will be assume, though Vertex AI + may be unable to scale beyond certain replica + number. + """ + + min_replica_count = proto.Field( + proto.INT32, + number=1, + ) + max_replica_count = proto.Field( + proto.INT32, + number=2, + ) + + +class BatchDedicatedResources(proto.Message): + r"""A description of resources that are used for performing batch + operations, are dedicated to a Model, and need manual + configuration. + + Attributes: + machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): + Required. Immutable. The specification of a + single machine. + starting_replica_count (int): + Immutable. The number of machine replicas used at the start + of the batch operation. If not set, Vertex AI decides + starting number, not greater than + [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count] + max_replica_count (int): + Immutable. The maximum number of machine + replicas the batch operation may be scaled to. + The default value is 10. + """ + + machine_spec = proto.Field( + proto.MESSAGE, + number=1, + message='MachineSpec', + ) + starting_replica_count = proto.Field( + proto.INT32, + number=2, + ) + max_replica_count = proto.Field( + proto.INT32, + number=3, + ) + + +class ResourcesConsumed(proto.Message): + r"""Statistics information about resource consumption. + Attributes: + replica_hours (float): + Output only. The number of replica hours + used. Note that many replicas may run in + parallel, and additionally any given work may be + queued for some time. Therefore this value is + not strictly related to wall time. + """ + + replica_hours = proto.Field( + proto.DOUBLE, + number=1, + ) + + +class DiskSpec(proto.Message): + r"""Represents the spec of disk options. + Attributes: + boot_disk_type (str): + Type of the boot disk (default is "pd-ssd"). + Valid values: "pd-ssd" (Persistent Disk Solid + State Drive) or "pd-standard" (Persistent Disk + Hard Disk Drive). + boot_disk_size_gb (int): + Size in GB of the boot disk (default is + 100GB). + """ + + boot_disk_type = proto.Field( + proto.STRING, + number=1, + ) + boot_disk_size_gb = proto.Field( + proto.INT32, + number=2, + ) + + +class AutoscalingMetricSpec(proto.Message): + r"""The metric specification that defines the target resource + utilization (CPU utilization, accelerator's duty cycle, and so + on) for calculating the desired replica count. + + Attributes: + metric_name (str): + Required. The resource metric name. Supported metrics: + + - For Online Prediction: + - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` + - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + target (int): + The target resource utilization in percentage + (1% - 100%) for the given metric; once the real + usage deviates from the target by a certain + percentage, the machine replicas change. The + default value is 60 (representing 60%) if not + provided. + """ + + metric_name = proto.Field( + proto.STRING, + number=1, + ) + target = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py new file mode 100644 index 0000000000..d0c6cfc111 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ManualBatchTuningParameters', + }, +) + + +class ManualBatchTuningParameters(proto.Message): + r"""Manual batch tuning parameters. + Attributes: + batch_size (int): + Immutable. The number of the records (e.g. + instances) of the operation given in each batch + to a machine replica. Machine type, and size of + a single record should be considered when + setting this parameter, higher value speeds up + the batch operation's execution, but too high + value will result in a whole batch not fitting + in a machine's memory, and the whole operation + will fail. + The default value is 4. + """ + + batch_size = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py new file mode 100644 index 0000000000..01e89e428e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'MetadataSchema', + }, +) + + +class MetadataSchema(proto.Message): + r"""Instance of a general MetadataSchema. + Attributes: + name (str): + Output only. The resource name of the + MetadataSchema. + schema_version (str): + The version of the MetadataSchema. The version's format must + match the following regular expression: + ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to + order/compare different versions.Example: 1.0.0, 1.0.1, etc. + schema (str): + Required. The raw YAML string representation of the + MetadataSchema. The combination of [MetadataSchema.version] + and the schema name given by ``title`` in + [MetadataSchema.schema] must be unique within a + MetadataStore. + + The schema is defined as an OpenAPI 3.0.2 `MetadataSchema + Object `__ + schema_type (google.cloud.aiplatform_v1beta1.types.MetadataSchema.MetadataSchemaType): + The type of the MetadataSchema. This is a + property that identifies which metadata types + will use the MetadataSchema. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataSchema was created. + description (str): + Description of the Metadata Schema + """ + class MetadataSchemaType(proto.Enum): + r"""Describes the type of the MetadataSchema.""" + METADATA_SCHEMA_TYPE_UNSPECIFIED = 0 + ARTIFACT_TYPE = 1 + EXECUTION_TYPE = 2 + CONTEXT_TYPE = 3 + + name = proto.Field( + proto.STRING, + number=1, + ) + schema_version = proto.Field( + proto.STRING, + number=2, + ) + schema = proto.Field( + proto.STRING, + number=3, + ) + schema_type = proto.Field( + proto.ENUM, + number=4, + enum=MetadataSchemaType, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + description = proto.Field( + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py new file mode 100644 index 0000000000..2f9484546d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py @@ -0,0 +1,1197 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateMetadataStoreRequest', + 'CreateMetadataStoreOperationMetadata', + 'GetMetadataStoreRequest', + 'ListMetadataStoresRequest', + 'ListMetadataStoresResponse', + 'DeleteMetadataStoreRequest', + 'DeleteMetadataStoreOperationMetadata', + 'CreateArtifactRequest', + 'GetArtifactRequest', + 'ListArtifactsRequest', + 'ListArtifactsResponse', + 'UpdateArtifactRequest', + 'CreateContextRequest', + 'GetContextRequest', + 'ListContextsRequest', + 'ListContextsResponse', + 'UpdateContextRequest', + 'DeleteContextRequest', + 'AddContextArtifactsAndExecutionsRequest', + 'AddContextArtifactsAndExecutionsResponse', + 'AddContextChildrenRequest', + 'AddContextChildrenResponse', + 'QueryContextLineageSubgraphRequest', + 'CreateExecutionRequest', + 'GetExecutionRequest', + 'ListExecutionsRequest', + 'ListExecutionsResponse', + 'UpdateExecutionRequest', + 'AddExecutionEventsRequest', + 'AddExecutionEventsResponse', + 'QueryExecutionInputsAndOutputsRequest', + 'CreateMetadataSchemaRequest', + 'GetMetadataSchemaRequest', + 'ListMetadataSchemasRequest', + 'ListMetadataSchemasResponse', + 'QueryArtifactLineageSubgraphRequest', + }, +) + + +class CreateMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + + Attributes: + parent (str): + Required. The resource name of the Location + where the MetadataStore should be created. + Format: projects/{project}/locations/{location}/ + metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): + Required. The MetadataStore to create. + metadata_store_id (str): + The {metadatastore} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be unique + across all MetadataStores in the parent Location. (Otherwise + the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataStore.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + metadata_store = proto.Field( + proto.MESSAGE, + number=2, + message=gca_metadata_store.MetadataStore, + ) + metadata_store_id = proto.Field( + proto.STRING, + number=3, + ) + + +class CreateMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for creating a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the + MetadataStore to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListMetadataStoresRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Attributes: + parent (str): + Required. The Location whose MetadataStores + should be listed. Format: + projects/{project}/locations/{location} + page_size (int): + The maximum number of Metadata Stores to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + + +class ListMetadataStoresResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. + + Attributes: + metadata_stores (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataStore]): + The MetadataStores found for the Location. + next_page_token (str): + A token, which can be sent as + [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataStoresRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_stores = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_metadata_store.MetadataStore, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteMetadataStoreRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + + Attributes: + name (str): + Required. The resource name of the + MetadataStore to delete. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + force (bool): + If set to true, any child resources of this MetadataStore + will be deleted. (Otherwise, the request will fail with a + FAILED_PRECONDITION error if the MetadataStore has any child + resources.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class DeleteMetadataStoreOperationMetadata(proto.Message): + r"""Details of operations that perform + [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for deleting a + MetadataStore. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class CreateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. + + Attributes: + parent (str): + Required. The resource name of the + MetadataStore where the Artifact should be + created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact to create. + artifact_id (str): + The {artifact} portion of the resource name with the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + If not provided, the Artifact's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are /[a-z][0-9]-/. Must be unique across all + Artifacts in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Artifact.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + artifact = proto.Field( + proto.MESSAGE, + number=2, + message=gca_artifact.Artifact, + ) + artifact_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. + + Attributes: + name (str): + Required. The resource name of the Artifact + to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListArtifactsRequest(proto.Message): + r"""Request message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Attributes: + parent (str): + Required. The MetadataStore whose Artifacts + should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + page_size (int): + The maximum number of Artifacts to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + The supported set of filters include the following: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Artifacts based on + the contexts to which they belong, use the function + operator with the full resource name + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` + + Each of the above supported filter types can be combined + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListArtifactsResponse(proto.Message): + r"""Response message for + [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. + + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): + The Artifacts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1beta1.ListArtifactsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_artifact.Artifact, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateArtifactRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. + + Attributes: + artifact (google.cloud.aiplatform_v1beta1.types.Artifact): + Required. The Artifact containing updates. The Artifact's + [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] + field is used to identify the Artifact to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating which fields + should be updated. + allow_missing (bool): + If set to true, and the + [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not + found, a new + [Artifact][google.cloud.aiplatform.v1beta1.Artifact] will be + created. In this situation, ``update_mask`` is ignored. + """ + + artifact = proto.Field( + proto.MESSAGE, + number=1, + message=gca_artifact.Artifact, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field( + proto.BOOL, + number=3, + ) + + +class CreateContextRequest(proto.Message): + r"""Request message for + [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. + + Attributes: + parent (str): + Required. The resource name of the + MetadataStore where the Context should be + created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context to create. + context_id (str): + The {context} portion of the resource name with the format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. + If not provided, the Context's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are /[a-z][0-9]-/. Must be unique across all + Contexts in the parent MetadataStore. (Otherwise the request + will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the + caller can't view the preexisting Context.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + context = proto.Field( + proto.MESSAGE, + number=2, + message=gca_context.Context, + ) + context_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetContextRequest(proto.Message): + r"""Request message for + [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. + + Attributes: + name (str): + Required. The resource name of the Context to + retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListContextsRequest(proto.Message): + r"""Request message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + + Attributes: + parent (str): + Required. The MetadataStore whose Contexts + should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + page_size (int): + The maximum number of Contexts to return. The + service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Contexts to + satisfy in order to be part of the result set. The syntax to + define filter query is based on https://google.aip.dev/160. + Following are the supported set of filters: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0``. + + - **Parent Child filtering**: To filter Contexts based on + parent-child relationship use the HAS operator as + follows: + + :: + + parent_contexts: + "projects//locations//metadataStores//contexts/" + child_contexts: + "projects//locations//metadataStores//contexts/" + + Each of the above supported filters can be combined together + using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListContextsResponse(proto.Message): + r"""Response message for + [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. + + Attributes: + contexts (Sequence[google.cloud.aiplatform_v1beta1.types.Context]): + The Contexts retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListContextsRequest.page_token][google.cloud.aiplatform.v1beta1.ListContextsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + contexts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_context.Context, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateContextRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. + + Attributes: + context (google.cloud.aiplatform_v1beta1.types.Context): + Required. The Context containing updates. The Context's + [Context.name][google.cloud.aiplatform.v1beta1.Context.name] + field is used to identify the Context to be updated. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating which fields + should be updated. + allow_missing (bool): + If set to true, and the + [Context][google.cloud.aiplatform.v1beta1.Context] is not + found, a new + [Context][google.cloud.aiplatform.v1beta1.Context] will be + created. In this situation, ``update_mask`` is ignored. + """ + + context = proto.Field( + proto.MESSAGE, + number=1, + message=gca_context.Context, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field( + proto.BOOL, + number=3, + ) + + +class DeleteContextRequest(proto.Message): + r"""Request message for + [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. + + Attributes: + name (str): + Required. The resource name of the Context to + retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + force (bool): + If set to true, any child resources of this Context will be + deleted. (Otherwise, the request will fail with a + FAILED_PRECONDITION error if the Context has any child + resources, such as another Context, Artifact, or Execution). + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class AddContextArtifactsAndExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + + Attributes: + context (str): + Required. The resource name of the Context + that the Artifacts and Executions belong to. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + artifacts (Sequence[str]): + The resource names of the Artifacts to + attribute to the Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + executions (Sequence[str]): + The resource names of the Executions to + associate with the Context. + + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + """ + + context = proto.Field( + proto.STRING, + number=1, + ) + artifacts = proto.RepeatedField( + proto.STRING, + number=2, + ) + executions = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class AddContextArtifactsAndExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. + """ + + +class AddContextChildrenRequest(proto.Message): + r"""Request message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + + Attributes: + context (str): + Required. The resource name of the parent + Context. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + child_contexts (Sequence[str]): + The resource names of the child Contexts. + """ + + context = proto.Field( + proto.STRING, + number=1, + ) + child_contexts = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class AddContextChildrenResponse(proto.Message): + r"""Response message for + [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. + """ + + +class QueryContextLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. + + Attributes: + context (str): + Required. The resource name of the Context whose Artifacts + and Executions should be retrieved as a LineageSubgraph. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + """ + + context = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. + + Attributes: + parent (str): + Required. The resource name of the + MetadataStore where the Execution should be + created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution to create. + execution_id (str): + The {execution} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + If not provided, the Execution's ID will be a UUID generated + by the service. Must be 4-128 characters in length. Valid + characters are /[a-z][0-9]-/. Must be unique across all + Executions in the parent MetadataStore. (Otherwise the + request will fail with ALREADY_EXISTS, or PERMISSION_DENIED + if the caller can't view the preexisting Execution.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + execution = proto.Field( + proto.MESSAGE, + number=2, + message=gca_execution.Execution, + ) + execution_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. + + Attributes: + name (str): + Required. The resource name of the Execution + to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListExecutionsRequest(proto.Message): + r"""Request message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Attributes: + parent (str): + Required. The MetadataStore whose Executions + should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + page_size (int): + The maximum number of Executions to return. + The service may return fewer. Must be in range + 1-1000, inclusive. Defaults to 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions] + call. Provide this to retrieve the subsequent page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with an INVALID_ARGUMENT error.) + filter (str): + Filter specifying the boolean condition for the Executions + to satisfy in order to be part of the result set. The syntax + to define filter query is based on + https://google.aip.dev/160. Following are the supported set + of filters: + + - **Attribute filtering**: For example: + ``display_name = "test"``. Supported fields include: + ``name``, ``display_name``, ``state``, ``schema_title``, + ``create_time``, and ``update_time``. Time fields, such + as ``create_time`` and ``update_time``, require values + specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"``. + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..`` For example: + ``metadata.field_1.number_value = 10.0`` + - **Context based filtering**: To filter Executions based + on the contexts to which they belong use the function + operator with the full resource name: + ``in_context()``. For example: + ``in_context("projects//locations//metadataStores//contexts/")`` + + Each of the above supported filters can be combined together + using logical operators (``AND`` & ``OR``). For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListExecutionsResponse(proto.Message): + r"""Response message for + [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. + + Attributes: + executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): + The Executions retrieved from the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListExecutionsRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + executions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_execution.Execution, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateExecutionRequest(proto.Message): + r"""Request message for + [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. + + Attributes: + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Required. The Execution containing updates. The Execution's + [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] + field is used to identify the Execution to be updated. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A FieldMask indicating which fields + should be updated. + allow_missing (bool): + If set to true, and the + [Execution][google.cloud.aiplatform.v1beta1.Execution] is + not found, a new + [Execution][google.cloud.aiplatform.v1beta1.Execution] will + be created. In this situation, ``update_mask`` is ignored. + """ + + execution = proto.Field( + proto.MESSAGE, + number=1, + message=gca_execution.Execution, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + allow_missing = proto.Field( + proto.BOOL, + number=3, + ) + + +class AddExecutionEventsRequest(proto.Message): + r"""Request message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + + Attributes: + execution (str): + Required. The resource name of the Execution + that the Events connect Artifacts with. + Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): + The Events to create and add. + """ + + execution = proto.Field( + proto.STRING, + number=1, + ) + events = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=event.Event, + ) + + +class AddExecutionEventsResponse(proto.Message): + r"""Response message for + [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. + """ + + +class QueryExecutionInputsAndOutputsRequest(proto.Message): + r"""Request message for + [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. + + Attributes: + execution (str): + Required. The resource name of the Execution + whose input and output Artifacts should be + retrieved as a LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} + """ + + execution = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. + + Attributes: + parent (str): + Required. The resource name of the + MetadataStore where the MetadataSchema should be + created. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): + Required. The MetadataSchema to create. + metadata_schema_id (str): + The {metadata_schema} portion of the resource name with the + format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + If not provided, the MetadataStore's ID will be a UUID + generated by the service. Must be 4-128 characters in + length. Valid characters are /[a-z][0-9]-/. Must be unique + across all MetadataSchemas in the parent Location. + (Otherwise the request will fail with ALREADY_EXISTS, or + PERMISSION_DENIED if the caller can't view the preexisting + MetadataSchema.) + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + metadata_schema = proto.Field( + proto.MESSAGE, + number=2, + message=gca_metadata_schema.MetadataSchema, + ) + metadata_schema_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetMetadataSchemaRequest(proto.Message): + r"""Request message for + [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. + + Attributes: + name (str): + Required. The resource name of the + MetadataSchema to retrieve. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListMetadataSchemasRequest(proto.Message): + r"""Request message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Attributes: + parent (str): + Required. The MetadataStore whose + MetadataSchemas should be listed. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore} + page_size (int): + The maximum number of MetadataSchemas to + return. The service may return fewer. + Must be in range 1-1000, inclusive. Defaults to + 100. + page_token (str): + A page token, received from a previous + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas] + call. Provide this to retrieve the next page. + + When paginating, all other provided parameters must match + the call that provided the page token. (Otherwise the + request will fail with INVALID_ARGUMENT error.) + filter (str): + A query to filter available MetadataSchemas + for matching results. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class ListMetadataSchemasResponse(proto.Message): + r"""Response message for + [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. + + Attributes: + metadata_schemas (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataSchema]): + The MetadataSchemas found for the + MetadataStore. + next_page_token (str): + A token, which can be sent as + [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataSchemasRequest.page_token] + to retrieve the next page. If this field is not populated, + there are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + metadata_schemas = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_metadata_schema.MetadataSchema, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class QueryArtifactLineageSubgraphRequest(proto.Message): + r"""Request message for + [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. + + Attributes: + artifact (str): + Required. The resource name of the Artifact whose Lineage + needs to be retrieved as a LineageSubgraph. Format: + projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} + + The request may error with FAILED_PRECONDITION if the number + of Artifacts, the number of Executions, or the number of + Events that would be returned for the Context exceeds 1000. + max_hops (int): + Specifies the size of the lineage graph in terms of number + of hops from the specified artifact. Negative Value: + INVALID_ARGUMENT error is returned 0: Only input artifact is + returned. No value: Transitive closure is performed to + return the complete graph. + filter (str): + Filter specifying the boolean condition for the Artifacts to + satisfy in order to be part of the Lineage Subgraph. The + syntax to define filter query is based on + https://google.aip.dev/160. The supported set of filters + include the following: + + - **Attribute filtering**: For example: + ``display_name = "test"`` Supported fields include: + ``name``, ``display_name``, ``uri``, ``state``, + ``schema_title``, ``create_time``, and ``update_time``. + Time fields, such as ``create_time`` and ``update_time``, + require values specified in RFC-3339 format. For example: + ``create_time = "2020-11-19T11:30:00-04:00"`` + - **Metadata field**: To filter on metadata fields use + traversal operation as follows: + ``metadata..``. For example: + ``metadata.field_1.number_value = 10.0`` + + Each of the above supported filter types can be combined + together using logical operators (``AND`` & ``OR``). + + For example: + ``display_name = "test" AND metadata.field1.bool_value = true``. + """ + + artifact = proto.Field( + proto.STRING, + number=1, + ) + max_hops = proto.Field( + proto.INT32, + number=2, + ) + filter = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py new file mode 100644 index 0000000000..1594416c19 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'MetadataStore', + }, +) + + +class MetadataStore(proto.Message): + r"""Instance of a metadata store. Contains a set of metadata that + can be queried. + + Attributes: + name (str): + Output only. The resource name of the + MetadataStore instance. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MetadataStore was last updated. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Metadata Store. If set, this Metadata Store and + all sub-resources of this Metadata Store are + secured using this key. + description (str): + Description of the MetadataStore. + state (google.cloud.aiplatform_v1beta1.types.MetadataStore.MetadataStoreState): + Output only. State information of the + MetadataStore. + """ + + class MetadataStoreState(proto.Message): + r"""Represents state information for a MetadataStore. + Attributes: + disk_utilization_bytes (int): + The disk utilization of the MetadataStore in + bytes. + """ + + disk_utilization_bytes = proto.Field( + proto.INT64, + number=1, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=5, + message=gca_encryption_spec.EncryptionSpec, + ) + description = proto.Field( + proto.STRING, + number=6, + ) + state = proto.Field( + proto.MESSAGE, + number=7, + message=MetadataStoreState, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py new file mode 100644 index 0000000000..0817f504dd --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'MigratableResource', + }, +) + + +class MigratableResource(proto.Message): + r"""Represents one resource that exists in automl.googleapis.com, + datalabeling.googleapis.com or ml.googleapis.com. + + Attributes: + ml_engine_model_version (google.cloud.aiplatform_v1beta1.types.MigratableResource.MlEngineModelVersion): + Output only. Represents one Version in + ml.googleapis.com. + automl_model (google.cloud.aiplatform_v1beta1.types.MigratableResource.AutomlModel): + Output only. Represents one Model in + automl.googleapis.com. + automl_dataset (google.cloud.aiplatform_v1beta1.types.MigratableResource.AutomlDataset): + Output only. Represents one Dataset in + automl.googleapis.com. + data_labeling_dataset (google.cloud.aiplatform_v1beta1.types.MigratableResource.DataLabelingDataset): + Output only. Represents one Dataset in + datalabeling.googleapis.com. + last_migrate_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when the last + migration attempt on this MigratableResource + started. Will not be set if there's no migration + attempt on this MigratableResource. + last_update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + MigratableResource was last updated. + """ + + class MlEngineModelVersion(proto.Message): + r"""Represents one model Version in ml.googleapis.com. + Attributes: + endpoint (str): + The ml.googleapis.com endpoint that this model Version + currently lives in. Example values: + + - ml.googleapis.com + - us-centrall-ml.googleapis.com + - europe-west4-ml.googleapis.com + - asia-east1-ml.googleapis.com + version (str): + Full resource name of ml engine model Version. Format: + ``projects/{project}/models/{model}/versions/{version}``. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + version = proto.Field( + proto.STRING, + number=2, + ) + + class AutomlModel(proto.Message): + r"""Represents one Model in automl.googleapis.com. + Attributes: + model (str): + Full resource name of automl Model. Format: + ``projects/{project}/locations/{location}/models/{model}``. + model_display_name (str): + The Model's display name in + automl.googleapis.com. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_display_name = proto.Field( + proto.STRING, + number=3, + ) + + class AutomlDataset(proto.Message): + r"""Represents one Dataset in automl.googleapis.com. + Attributes: + dataset (str): + Full resource name of automl Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}``. + dataset_display_name (str): + The Dataset's display name in + automl.googleapis.com. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=4, + ) + + class DataLabelingDataset(proto.Message): + r"""Represents one Dataset in datalabeling.googleapis.com. + Attributes: + dataset (str): + Full resource name of data labeling Dataset. Format: + ``projects/{project}/datasets/{dataset}``. + dataset_display_name (str): + The Dataset's display name in + datalabeling.googleapis.com. + data_labeling_annotated_datasets (Sequence[google.cloud.aiplatform_v1beta1.types.MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset]): + The migratable AnnotatedDataset in + datalabeling.googleapis.com belongs to the data + labeling Dataset. + """ + + class DataLabelingAnnotatedDataset(proto.Message): + r"""Represents one AnnotatedDataset in + datalabeling.googleapis.com. + + Attributes: + annotated_dataset (str): + Full resource name of data labeling AnnotatedDataset. + Format: + ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. + annotated_dataset_display_name (str): + The AnnotatedDataset's display name in + datalabeling.googleapis.com. + """ + + annotated_dataset = proto.Field( + proto.STRING, + number=1, + ) + annotated_dataset_display_name = proto.Field( + proto.STRING, + number=3, + ) + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=4, + ) + data_labeling_annotated_datasets = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset', + ) + + ml_engine_model_version = proto.Field( + proto.MESSAGE, + number=1, + oneof='resource', + message=MlEngineModelVersion, + ) + automl_model = proto.Field( + proto.MESSAGE, + number=2, + oneof='resource', + message=AutomlModel, + ) + automl_dataset = proto.Field( + proto.MESSAGE, + number=3, + oneof='resource', + message=AutomlDataset, + ) + data_labeling_dataset = proto.Field( + proto.MESSAGE, + number=4, + oneof='resource', + message=DataLabelingDataset, + ) + last_migrate_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + last_update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py new file mode 100644 index 0000000000..c851edc95d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py @@ -0,0 +1,439 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import migratable_resource as gca_migratable_resource +from google.cloud.aiplatform_v1beta1.types import operation +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'SearchMigratableResourcesRequest', + 'SearchMigratableResourcesResponse', + 'BatchMigrateResourcesRequest', + 'MigrateResourceRequest', + 'BatchMigrateResourcesResponse', + 'MigrateResourceResponse', + 'BatchMigrateResourcesOperationMetadata', + }, +) + + +class SearchMigratableResourcesRequest(proto.Message): + r"""Request message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + + Attributes: + parent (str): + Required. The location that the migratable resources should + be searched from. It's the Vertex AI location that the + resources can be migrated to, not the resources' original + location. Format: + ``projects/{project}/locations/{location}`` + page_size (int): + The standard page size. + The default and maximum value is 100. + page_token (str): + The standard page token. + filter (str): + A filter for your search. You can use the following types of + filters: + + - Resource type filters. The following strings filter for a + specific type of + [MigratableResource][google.cloud.aiplatform.v1beta1.MigratableResource]: + + - ``ml_engine_model_version:*`` + - ``automl_model:*`` + - ``automl_dataset:*`` + - ``data_labeling_dataset:*`` + + - "Migrated or not" filters. The following strings filter + for resources that either have or have not already been + migrated: + + - ``last_migrate_time:*`` filters for migrated + resources. + - ``NOT last_migrate_time:*`` filters for not yet + migrated resources. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + filter = proto.Field( + proto.STRING, + number=4, + ) + + +class SearchMigratableResourcesResponse(proto.Message): + r"""Response message for + [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. + + Attributes: + migratable_resources (Sequence[google.cloud.aiplatform_v1beta1.types.MigratableResource]): + All migratable resources that can be migrated + to the location specified in the request. + next_page_token (str): + The standard next-page token. The migratable_resources may + not fill page_size in SearchMigratableResourcesRequest even + when there are subsequent pages. + """ + + @property + def raw_page(self): + return self + + migratable_resources = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_migratable_resource.MigratableResource, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class BatchMigrateResourcesRequest(proto.Message): + r"""Request message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + Attributes: + parent (str): + Required. The location of the migrated resource will live + in. Format: ``projects/{project}/locations/{location}`` + migrate_resource_requests (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]): + Required. The request messages specifying the + resources to migrate. They must be in the same + location as the destination. Up to 50 resources + can be migrated in one batch. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + migrate_resource_requests = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='MigrateResourceRequest', + ) + + +class MigrateResourceRequest(proto.Message): + r"""Config of migrating one resource from automl.googleapis.com, + datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. + + Attributes: + migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): + Config for migrating Version in + ml.googleapis.com to Vertex AI's Model. + migrate_automl_model_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlModelConfig): + Config for migrating Model in + automl.googleapis.com to Vertex AI's Model. + migrate_automl_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): + Config for migrating Dataset in + automl.googleapis.com to Vertex AI's Dataset. + migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): + Config for migrating Dataset in + datalabeling.googleapis.com to Vertex AI's + Dataset. + """ + + class MigrateMlEngineModelVersionConfig(proto.Message): + r"""Config for migrating version in ml.googleapis.com to Vertex + AI's Model. + + Attributes: + endpoint (str): + Required. The ml.googleapis.com endpoint that this model + version should be migrated from. Example values: + + - ml.googleapis.com + + - us-centrall-ml.googleapis.com + + - europe-west4-ml.googleapis.com + + - asia-east1-ml.googleapis.com + model_version (str): + Required. Full resource name of ml engine model version. + Format: + ``projects/{project}/models/{model}/versions/{version}``. + model_display_name (str): + Required. Display name of the model in Vertex + AI. System will pick a display name if + unspecified. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + model_version = proto.Field( + proto.STRING, + number=2, + ) + model_display_name = proto.Field( + proto.STRING, + number=3, + ) + + class MigrateAutomlModelConfig(proto.Message): + r"""Config for migrating Model in automl.googleapis.com to Vertex + AI's Model. + + Attributes: + model (str): + Required. Full resource name of automl Model. Format: + ``projects/{project}/locations/{location}/models/{model}``. + model_display_name (str): + Optional. Display name of the model in Vertex + AI. System will pick a display name if + unspecified. + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + model_display_name = proto.Field( + proto.STRING, + number=2, + ) + + class MigrateAutomlDatasetConfig(proto.Message): + r"""Config for migrating Dataset in automl.googleapis.com to + Vertex AI's Dataset. + + Attributes: + dataset (str): + Required. Full resource name of automl Dataset. Format: + ``projects/{project}/locations/{location}/datasets/{dataset}``. + dataset_display_name (str): + Required. Display name of the Dataset in + Vertex AI. System will pick a display name if + unspecified. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=2, + ) + + class MigrateDataLabelingDatasetConfig(proto.Message): + r"""Config for migrating Dataset in datalabeling.googleapis.com + to AI Platform's Dataset. + + Attributes: + dataset (str): + Required. Full resource name of data labeling Dataset. + Format: ``projects/{project}/datasets/{dataset}``. + dataset_display_name (str): + Optional. Display name of the Dataset in + Vertex AI. System will pick a display name if + unspecified. + migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): + Optional. Configs for migrating + AnnotatedDataset in datalabeling.googleapis.com + to Vertex AI's SavedQuery. The specified + AnnotatedDatasets have to belong to the + datalabeling Dataset. + """ + + class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): + r"""Config for migrating AnnotatedDataset in + datalabeling.googleapis.com to Vertex AI's SavedQuery. + + Attributes: + annotated_dataset (str): + Required. Full resource name of data labeling + AnnotatedDataset. Format: + ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. + """ + + annotated_dataset = proto.Field( + proto.STRING, + number=1, + ) + + dataset = proto.Field( + proto.STRING, + number=1, + ) + dataset_display_name = proto.Field( + proto.STRING, + number=2, + ) + migrate_data_labeling_annotated_dataset_configs = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig', + ) + + migrate_ml_engine_model_version_config = proto.Field( + proto.MESSAGE, + number=1, + oneof='request', + message=MigrateMlEngineModelVersionConfig, + ) + migrate_automl_model_config = proto.Field( + proto.MESSAGE, + number=2, + oneof='request', + message=MigrateAutomlModelConfig, + ) + migrate_automl_dataset_config = proto.Field( + proto.MESSAGE, + number=3, + oneof='request', + message=MigrateAutomlDatasetConfig, + ) + migrate_data_labeling_dataset_config = proto.Field( + proto.MESSAGE, + number=4, + oneof='request', + message=MigrateDataLabelingDatasetConfig, + ) + + +class BatchMigrateResourcesResponse(proto.Message): + r"""Response message for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + Attributes: + migrate_resource_responses (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceResponse]): + Successfully migrated resources. + """ + + migrate_resource_responses = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='MigrateResourceResponse', + ) + + +class MigrateResourceResponse(proto.Message): + r"""Describes a successfully migrated resource. + Attributes: + dataset (str): + Migrated Dataset's resource name. + model (str): + Migrated Model's resource name. + migratable_resource (google.cloud.aiplatform_v1beta1.types.MigratableResource): + Before migration, the identifier in + ml.googleapis.com, automl.googleapis.com or + datalabeling.googleapis.com. + """ + + dataset = proto.Field( + proto.STRING, + number=1, + oneof='migrated_resource', + ) + model = proto.Field( + proto.STRING, + number=2, + oneof='migrated_resource', + ) + migratable_resource = proto.Field( + proto.MESSAGE, + number=3, + message=gca_migratable_resource.MigratableResource, + ) + + +class BatchMigrateResourcesOperationMetadata(proto.Message): + r"""Runtime operation information for + [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + partial_results (Sequence[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesOperationMetadata.PartialResult]): + Partial results that reflect the latest + migration operation progress. + """ + + class PartialResult(proto.Message): + r"""Represents a partial result in batch migration operation for one + [MigrateResourceRequest][google.cloud.aiplatform.v1beta1.MigrateResourceRequest]. + + Attributes: + error (google.rpc.status_pb2.Status): + The error result of the migration request in + case of failure. + model (str): + Migrated model resource name. + dataset (str): + Migrated dataset resource name. + request (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest): + It's the same as the value in + [MigrateResourceRequest.migrate_resource_requests][]. + """ + + error = proto.Field( + proto.MESSAGE, + number=2, + oneof='result', + message=status_pb2.Status, + ) + model = proto.Field( + proto.STRING, + number=3, + oneof='result', + ) + dataset = proto.Field( + proto.STRING, + number=4, + oneof='result', + ) + request = proto.Field( + proto.MESSAGE, + number=1, + message='MigrateResourceRequest', + ) + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + partial_results = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=PartialResult, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py new file mode 100644 index 0000000000..1a068335a6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py @@ -0,0 +1,752 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var +from google.cloud.aiplatform_v1beta1.types import explanation +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Model', + 'PredictSchemata', + 'ModelContainerSpec', + 'Port', + }, +) + + +class Model(proto.Message): + r"""A trained machine learning Model. + Attributes: + name (str): + The resource name of the Model. + display_name (str): + Required. The display name of the Model. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + description (str): + The description of the Model. + predict_schemata (google.cloud.aiplatform_v1beta1.types.PredictSchemata): + The schemata that describe formats of the Model's + predictions and explanations as given and returned via + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + and + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + metadata_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing additional information about the Model, + that is specific to it. Unset if the Model does not have any + additional information. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no additional metadata is needed, this field is set to an + empty string. Note: The URI given on output will be + immutable and probably different, including the URI scheme, + than the one given on input. The output URI will point to a + location where the user only has a read access. + metadata (google.protobuf.struct_pb2.Value): + Immutable. An additional information about the Model; the + schema of the metadata can be found in + [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri]. + Unset if the Model does not have any additional information. + supported_export_formats (Sequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat]): + Output only. The formats in which this Model + may be exported. If empty, this Model is not + available for export. + training_pipeline (str): + Output only. The resource name of the + TrainingPipeline that uploaded this Model, if + any. + container_spec (google.cloud.aiplatform_v1beta1.types.ModelContainerSpec): + Input only. The specification of the container that is to be + used when deploying this Model. The specification is + ingested upon + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], + and all binaries it contains are copied and stored + internally by Vertex AI. Not present for AutoML Models. + artifact_uri (str): + Immutable. The path to the directory + containing the Model artifact and any of its + supporting files. Not present for AutoML Models. + supported_deployment_resources_types (Sequence[google.cloud.aiplatform_v1beta1.types.Model.DeploymentResourcesType]): + Output only. When this Model is deployed, its prediction + resources are described by the ``prediction_resources`` + field of the + [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + object. Because not all Models support all resource + configuration types, the configuration types this Model + supports are listed here. If no configuration types are + listed, the Model cannot be deployed to an + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and + does not support online predictions + ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + or + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). + Such a Model can serve predictions by using a + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], + if it has at least one entry each in + [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] + and + [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. + supported_input_storage_formats (Sequence[str]): + Output only. The formats this Model supports in + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + If + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + exists, the instances should be given as per that schema. + + The possible formats are: + + - ``jsonl`` The JSON Lines format, where each instance is a + single line. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``csv`` The CSV format, where each instance is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``tf-record`` The TFRecord format, where each instance is + a single record in tfrecord syntax. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``tf-record-gzip`` Similar to ``tf-record``, but the file + is gzipped. Uses + [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + + - ``bigquery`` Each instance is a single row in BigQuery. + Uses + [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source]. + + - ``file-list`` Each line of the file is the location of an + instance to process, uses ``gcs_source`` field of the + [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] + object. + + If this Model doesn't support any of these formats it means + it cannot be used with a + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + However, if it has + [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], + it could serve online predictions by using + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + or + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + supported_output_storage_formats (Sequence[str]): + Output only. The formats this Model supports in + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + If both + [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + and + [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] + exist, the predictions are returned together with their + instances. In other words, the prediction has the original + instance data first, followed by the actual prediction + content (as per the schema). + + The possible formats are: + + - ``jsonl`` The JSON Lines format, where each prediction is + a single line. Uses + [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + + - ``csv`` The CSV format, where each prediction is a single + comma-separated line. The first line in the file is the + header, containing comma-separated field names. Uses + [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + + - ``bigquery`` Each prediction is a single row in a + BigQuery table, uses + [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination] + . + + If this Model doesn't support any of these formats it means + it cannot be used with a + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + However, if it has + [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], + it could serve online predictions by using + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + or + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Model was + uploaded into Vertex AI. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Model was + most recently updated. + deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModelRef]): + Output only. The pointers to DeployedModels + created from this Model. Note that Model could + have been deployed to Endpoints in different + Locations. + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + The default explanation specification for this Model. + + The Model can be used for [requesting + explanation][PredictionService.Explain] after being + [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] + if it is populated. The Model can be used for [batch + explanation][BatchPredictionJob.generate_explanation] if it + is populated. + + All fields of the explanation_spec can be overridden by + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + of + [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model], + or + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + of + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + + If the default explanation specification is not set for this + Model, this Model can still be used for [requesting + explanation][PredictionService.Explain] by setting + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + of + [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model] + and for [batch + explanation][BatchPredictionJob.generate_explanation] by + setting + [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] + of + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Model.LabelsEntry]): + The labels with user-defined metadata to + organize your Models. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Model. If set, this Model and all sub-resources + of this Model will be secured by this key. + """ + class DeploymentResourcesType(proto.Enum): + r"""Identifies a type of Model's prediction resources.""" + DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0 + DEDICATED_RESOURCES = 1 + AUTOMATIC_RESOURCES = 2 + + class ExportFormat(proto.Message): + r"""Represents export format supported by the Model. + All formats export to Google Cloud Storage. + + Attributes: + id (str): + Output only. The ID of the export format. The possible + format IDs are: + + - ``tflite`` Used for Android mobile devices. + + - ``edgetpu-tflite`` Used for `Edge + TPU `__ devices. + + - ``tf-saved-model`` A tensorflow model in SavedModel + format. + + - ``tf-js`` A + `TensorFlow.js `__ model + that can be used in the browser and in Node.js using + JavaScript. + + - ``core-ml`` Used for iOS mobile devices. + + - ``custom-trained`` A Model that was uploaded or trained + by custom code. + exportable_contents (Sequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat.ExportableContent]): + Output only. The content of this Model that + may be exported. + """ + class ExportableContent(proto.Enum): + r"""The Model content that can be exported.""" + EXPORTABLE_CONTENT_UNSPECIFIED = 0 + ARTIFACT = 1 + IMAGE = 2 + + id = proto.Field( + proto.STRING, + number=1, + ) + exportable_contents = proto.RepeatedField( + proto.ENUM, + number=2, + enum='Model.ExportFormat.ExportableContent', + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + predict_schemata = proto.Field( + proto.MESSAGE, + number=4, + message='PredictSchemata', + ) + metadata_schema_uri = proto.Field( + proto.STRING, + number=5, + ) + metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + supported_export_formats = proto.RepeatedField( + proto.MESSAGE, + number=20, + message=ExportFormat, + ) + training_pipeline = proto.Field( + proto.STRING, + number=7, + ) + container_spec = proto.Field( + proto.MESSAGE, + number=9, + message='ModelContainerSpec', + ) + artifact_uri = proto.Field( + proto.STRING, + number=26, + ) + supported_deployment_resources_types = proto.RepeatedField( + proto.ENUM, + number=10, + enum=DeploymentResourcesType, + ) + supported_input_storage_formats = proto.RepeatedField( + proto.STRING, + number=11, + ) + supported_output_storage_formats = proto.RepeatedField( + proto.STRING, + number=12, + ) + create_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + deployed_models = proto.RepeatedField( + proto.MESSAGE, + number=15, + message=deployed_model_ref.DeployedModelRef, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=23, + message=explanation.ExplanationSpec, + ) + etag = proto.Field( + proto.STRING, + number=16, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=17, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=24, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class PredictSchemata(proto.Message): + r"""Contains the schemata used in Model's predictions and explanations + via + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] + and + [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + + Attributes: + instance_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the format of a single instance, which + are used in + [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], + [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + and + [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + parameters_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the parameters of prediction and + explanation via + [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], + [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] + and + [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI, + if no parameters are supported, then it is set to an empty + string. Note: The URI given on output will be immutable and + probably different, including the URI scheme, than the one + given on input. The output URI will point to a location + where the user only has a read access. + prediction_schema_uri (str): + Immutable. Points to a YAML file stored on Google Cloud + Storage describing the format of a single prediction + produced by this Model, which are returned via + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], + [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], + and + [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + The schema is defined as an OpenAPI 3.0.2 `Schema + Object `__. + AutoML Models always have this field populated by Vertex AI. + Note: The URI given on output will be immutable and probably + different, including the URI scheme, than the one given on + input. The output URI will point to a location where the + user only has a read access. + """ + + instance_schema_uri = proto.Field( + proto.STRING, + number=1, + ) + parameters_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + prediction_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + + +class ModelContainerSpec(proto.Message): + r"""Specification of a container for serving predictions. Some fields in + this message correspond to fields in the `Kubernetes Container v1 + core + specification `__. + + Attributes: + image_uri (str): + Required. Immutable. URI of the Docker image to be used as + the custom container for serving predictions. This URI must + identify an image in Artifact Registry or Container + Registry. Learn more about the `container publishing + requirements `__, + including permissions requirements for the AI Platform + Service Agent. + + The container image is ingested upon + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], + stored internally, and this original path is afterwards not + used. + + To learn about the requirements for the Docker image itself, + see `Custom container + requirements `__. + + You can use the URI to one of Vertex AI's `pre-built + container images for + prediction `__ + in this field. + command (Sequence[str]): + Immutable. Specifies the command that runs when the + container starts. This overrides the container's + `ENTRYPOINT `__. + Specify this field as an array of executable and arguments, + similar to a Docker ``ENTRYPOINT``'s "exec" form, not its + "shell" form. + + If you do not specify this field, then the container's + ``ENTRYPOINT`` runs, in conjunction with the + [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] + field or the container's + ```CMD`` `__, + if either exists. If this field is not specified and the + container does not have an ``ENTRYPOINT``, then refer to the + Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` + interact `__. + + If you specify this field, then you can also specify the + ``args`` field to provide additional arguments for this + command. However, if you specify this field, then the + container's ``CMD`` is ignored. See the `Kubernetes + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. + + In this field, you can reference `environment variables set + by Vertex + AI `__ + and environment variables set in the + [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] + field. You cannot reference environment variables set in the + Docker image. In order for environment variables to be + expanded, reference them by using the following syntax: + $(VARIABLE_NAME) Note that this differs from Bash variable + expansion, which does not use parentheses. If a variable + cannot be resolved, the reference in the input string is + used unchanged. To avoid variable expansion, you can escape + this syntax with ``$$``; for example: $$(VARIABLE_NAME) This + field corresponds to the ``command`` field of the Kubernetes + Containers `v1 core + API `__. + args (Sequence[str]): + Immutable. Specifies arguments for the command that runs + when the container starts. This overrides the container's + ```CMD`` `__. + Specify this field as an array of executable and arguments, + similar to a Docker ``CMD``'s "default parameters" form. + + If you don't specify this field but do specify the + [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] + field, then the command from the ``command`` field runs + without any additional arguments. See the `Kubernetes + documentation about how the ``command`` and ``args`` fields + interact with a container's ``ENTRYPOINT`` and + ``CMD`` `__. + + If you don't specify this field and don't specify the + ``command`` field, then the container's + ```ENTRYPOINT`` `__ + and ``CMD`` determine what runs based on their default + behavior. See the Docker documentation about `how ``CMD`` + and ``ENTRYPOINT`` + interact `__. + + In this field, you can reference `environment variables set + by Vertex + AI `__ + and environment variables set in the + [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] + field. You cannot reference environment variables set in the + Docker image. In order for environment variables to be + expanded, reference them by using the following syntax: + $(VARIABLE_NAME) Note that this differs from Bash variable + expansion, which does not use parentheses. If a variable + cannot be resolved, the reference in the input string is + used unchanged. To avoid variable expansion, you can escape + this syntax with ``$$``; for example: $$(VARIABLE_NAME) This + field corresponds to the ``args`` field of the Kubernetes + Containers `v1 core + API `__. + env (Sequence[google.cloud.aiplatform_v1beta1.types.EnvVar]): + Immutable. List of environment variables to set in the + container. After the container starts running, code running + in the container can read these environment variables. + + Additionally, the + [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] + and + [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] + fields can reference these variables. Later entries in this + list can also reference earlier entries. For example, the + following example sets the variable ``VAR_2`` to have the + value ``foo bar``: + + .. code:: json + + [ + { + "name": "VAR_1", + "value": "foo" + }, + { + "name": "VAR_2", + "value": "$(VAR_1) bar" + } + ] + + If you switch the order of the variables in the example, + then the expansion does not occur. + + This field corresponds to the ``env`` field of the + Kubernetes Containers `v1 core + API `__. + ports (Sequence[google.cloud.aiplatform_v1beta1.types.Port]): + Immutable. List of ports to expose from the container. + Vertex AI sends any prediction requests that it receives to + the first port on this list. AI Platform also sends + `liveness and health + checks `__ + to this port. + + If you do not specify this field, it defaults to following + value: + + .. code:: json + + [ + { + "containerPort": 8080 + } + ] + + Vertex AI does not use ports other than the first one + listed. This field corresponds to the ``ports`` field of the + Kubernetes Containers `v1 core + API `__. + predict_route (str): + Immutable. HTTP path on the container to send prediction + requests to. Vertex AI forwards requests sent using + [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] + to this path on the container's IP address and port. Vertex + AI then returns the container's response in the API + response. + + For example, if you set this field to ``/foo``, then when + Vertex AI receives a prediction request, it forwards the + request body in a POST request to the ``/foo`` path on the + port of your container specified by the first value of this + ``ModelContainerSpec``'s + [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] + field. + + If you don't specify this field, it defaults to the + following value when you [deploy this Model to an + Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: + /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + The placeholders in this value are replaced as follows: + + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) + health_route (str): + Immutable. HTTP path on the container to send health checks + to. Vertex AI intermittently sends GET requests to this path + on the container's IP address and port to check that the + container is healthy. Read more about `health + checks `__. + + For example, if you set this field to ``/bar``, then Vertex + AI intermittently sends a GET request to the ``/bar`` path + on the port of your container specified by the first value + of this ``ModelContainerSpec``'s + [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] + field. + + If you don't specify this field, it defaults to the + following value when you [deploy this Model to an + Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: + /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + The placeholders in this value are replaced as follows: + + - ENDPOINT: The last segment (following ``endpoints/``)of + the Endpoint.name][] field of the Endpoint where this + Model has been deployed. (Vertex AI makes this value + available to your container code as the + ```AIP_ENDPOINT_ID`` environment + variable `__.) + + - DEPLOYED_MODEL: + [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] + of the ``DeployedModel``. (Vertex AI makes this value + available to your container code as the + ```AIP_DEPLOYED_MODEL_ID`` environment + variable `__.) + """ + + image_uri = proto.Field( + proto.STRING, + number=1, + ) + command = proto.RepeatedField( + proto.STRING, + number=2, + ) + args = proto.RepeatedField( + proto.STRING, + number=3, + ) + env = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=env_var.EnvVar, + ) + ports = proto.RepeatedField( + proto.MESSAGE, + number=5, + message='Port', + ) + predict_route = proto.Field( + proto.STRING, + number=6, + ) + health_route = proto.Field( + proto.STRING, + number=7, + ) + + +class Port(proto.Message): + r"""Represents a network port in a container. + Attributes: + container_port (int): + The number of the port to expose on the pod's + IP address. Must be a valid port number, between + 1 and 65535 inclusive. + """ + + container_port = proto.Field( + proto.INT32, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py new file mode 100644 index 0000000000..db9cad318c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -0,0 +1,407 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import model_monitoring +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ModelDeploymentMonitoringObjectiveType', + 'ModelDeploymentMonitoringJob', + 'ModelDeploymentMonitoringBigQueryTable', + 'ModelDeploymentMonitoringObjectiveConfig', + 'ModelDeploymentMonitoringScheduleConfig', + 'ModelMonitoringStatsAnomalies', + }, +) + + +class ModelDeploymentMonitoringObjectiveType(proto.Enum): + r"""The Model Monitoring Objective types.""" + MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED = 0 + RAW_FEATURE_SKEW = 1 + RAW_FEATURE_DRIFT = 2 + FEATURE_ATTRIBUTION_SKEW = 3 + FEATURE_ATTRIBUTION_DRIFT = 4 + + +class ModelDeploymentMonitoringJob(proto.Message): + r"""Represents a job that runs periodically to monitor the + deployed models in an endpoint. It will analyze the logged + training & prediction data to detect any abnormal behaviors. + + Attributes: + name (str): + Output only. Resource name of a + ModelDeploymentMonitoringJob. + display_name (str): + Required. The user-defined name of the + ModelDeploymentMonitoringJob. The name can be up + to 128 characters long and can be consist of any + UTF-8 characters. + Display name of a ModelDeploymentMonitoringJob. + endpoint (str): + Required. Endpoint resource name. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + state (google.cloud.aiplatform_v1beta1.types.JobState): + Output only. The detailed state of the + monitoring job. When the job is still creating, + the state will be 'PENDING'. Once the job is + successfully created, the state will be + 'RUNNING'. Pause the job, the state will be + 'PAUSED'. + Resume the job, the state will return to + 'RUNNING'. + schedule_state (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState): + Output only. Schedule state when the + monitoring job is in Running state. + model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveConfig]): + Required. The config for monitoring + objectives. This is a per DeployedModel config. + Each DeployedModel needs to be configed + separately. + model_deployment_monitoring_schedule_config (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringScheduleConfig): + Required. Schedule config for running the + monitoring job. + logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): + Required. Sample Strategy for logging. + model_monitoring_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig): + Alert config for model monitoring. + predict_instance_schema_uri (str): + YAML schema file uri describing the format of + a single instance, which are given to format + this Endpoint's prediction (and explanation). If + not set, we will generate predict schema from + collected predict requests. + sample_predict_instance (google.protobuf.struct_pb2.Value): + Sample Predict instance, same format as + [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], + this can be set as a replacement of + [ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri]. + If not set, we will generate predict schema from collected + predict requests. + analysis_instance_schema_uri (str): + YAML schema file uri describing the format of a single + instance that you want Tensorflow Data Validation (TFDV) to + analyze. + + If this field is empty, all the feature data types are + inferred from + [predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri], + meaning that TFDV will use the data in the exact format(data + type) as prediction request/response. If there are any data + type differences between predict instance and TFDV instance, + this field can be used to override the schema. For models + trained with Vertex AI, this field must be set as all the + fields in predict instance formatted as string. + bigquery_tables (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable]): + Output only. The created bigquery tables for + the job under customer project. Customer could + do their own query & analysis. There could be 4 + log tables in maximum: + 1. Training data logging predict + request/response 2. Serving data logging predict + request/response + log_ttl (google.protobuf.duration_pb2.Duration): + The TTL of BigQuery tables in user projects + which stores logs. A day is the basic unit of + the TTL and we take the ceil of TTL/86400(a + day). e.g. { second: 3600} indicates ttl = 1 + day. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.LabelsEntry]): + The labels with user-defined metadata to + organize your ModelDeploymentMonitoringJob. + + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelDeploymentMonitoringJob was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelDeploymentMonitoringJob was updated most + recently. + next_schedule_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this monitoring + pipeline will be scheduled to run for the next + round. + stats_anomalies_base_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): + Stats anomalies base folder path. + """ + class MonitoringScheduleState(proto.Enum): + r"""The state to Specify the monitoring pipeline.""" + MONITORING_SCHEDULE_STATE_UNSPECIFIED = 0 + PENDING = 1 + OFFLINE = 2 + RUNNING = 3 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + endpoint = proto.Field( + proto.STRING, + number=3, + ) + state = proto.Field( + proto.ENUM, + number=4, + enum=job_state.JobState, + ) + schedule_state = proto.Field( + proto.ENUM, + number=5, + enum=MonitoringScheduleState, + ) + model_deployment_monitoring_objective_configs = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='ModelDeploymentMonitoringObjectiveConfig', + ) + model_deployment_monitoring_schedule_config = proto.Field( + proto.MESSAGE, + number=7, + message='ModelDeploymentMonitoringScheduleConfig', + ) + logging_sampling_strategy = proto.Field( + proto.MESSAGE, + number=8, + message=model_monitoring.SamplingStrategy, + ) + model_monitoring_alert_config = proto.Field( + proto.MESSAGE, + number=15, + message=model_monitoring.ModelMonitoringAlertConfig, + ) + predict_instance_schema_uri = proto.Field( + proto.STRING, + number=9, + ) + sample_predict_instance = proto.Field( + proto.MESSAGE, + number=19, + message=struct_pb2.Value, + ) + analysis_instance_schema_uri = proto.Field( + proto.STRING, + number=16, + ) + bigquery_tables = proto.RepeatedField( + proto.MESSAGE, + number=10, + message='ModelDeploymentMonitoringBigQueryTable', + ) + log_ttl = proto.Field( + proto.MESSAGE, + number=17, + message=duration_pb2.Duration, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + create_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + next_schedule_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + stats_anomalies_base_directory = proto.Field( + proto.MESSAGE, + number=20, + message=io.GcsDestination, + ) + + +class ModelDeploymentMonitoringBigQueryTable(proto.Message): + r"""ModelDeploymentMonitoringBigQueryTable specifies the BigQuery + table name as well as some information of the logs stored in + this table. + + Attributes: + log_source (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogSource): + The source of log. + log_type (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogType): + The type of log. + bigquery_table_path (str): + The created BigQuery table to store logs. Customer could do + their own query & analysis. Format: + ``bq://.model_deployment_monitoring_._`` + """ + class LogSource(proto.Enum): + r"""Indicates where does the log come from.""" + LOG_SOURCE_UNSPECIFIED = 0 + TRAINING = 1 + SERVING = 2 + + class LogType(proto.Enum): + r"""Indicates what type of traffic does the log belong to.""" + LOG_TYPE_UNSPECIFIED = 0 + PREDICT = 1 + EXPLAIN = 2 + + log_source = proto.Field( + proto.ENUM, + number=1, + enum=LogSource, + ) + log_type = proto.Field( + proto.ENUM, + number=2, + enum=LogType, + ) + bigquery_table_path = proto.Field( + proto.STRING, + number=3, + ) + + +class ModelDeploymentMonitoringObjectiveConfig(proto.Message): + r"""ModelDeploymentMonitoringObjectiveConfig contains the pair of + deployed_model_id to ModelMonitoringObjectiveConfig. + + Attributes: + deployed_model_id (str): + The DeployedModel ID of the objective config. + objective_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig): + The objective config of for the + modelmonitoring job of this deployed model. + """ + + deployed_model_id = proto.Field( + proto.STRING, + number=1, + ) + objective_config = proto.Field( + proto.MESSAGE, + number=2, + message=model_monitoring.ModelMonitoringObjectiveConfig, + ) + + +class ModelDeploymentMonitoringScheduleConfig(proto.Message): + r"""The config for scheduling monitoring job. + Attributes: + monitor_interval (google.protobuf.duration_pb2.Duration): + Required. The model monitoring job running + interval. It will be rounded up to next full + hour. + """ + + monitor_interval = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + + +class ModelMonitoringStatsAnomalies(proto.Message): + r"""Statistics and anomalies generated by Model Monitoring. + Attributes: + objective (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): + Model Monitoring Objective those stats and + anomalies belonging to. + deployed_model_id (str): + Deployed Model ID. + anomaly_count (int): + Number of anomalies within all stats. + feature_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies]): + A list of historical Stats and Anomalies + generated for all Features. + """ + + class FeatureHistoricStatsAnomalies(proto.Message): + r"""Historical Stats (and Anomalies) for a specific Feature. + Attributes: + feature_display_name (str): + Display Name of the Feature. + threshold (google.cloud.aiplatform_v1beta1.types.ThresholdConfig): + Threshold for anomaly detection. + training_stats (google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly): + Stats calculated for the Training Dataset. + prediction_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): + A list of historical stats generated by + different time window's Prediction Dataset. + """ + + feature_display_name = proto.Field( + proto.STRING, + number=1, + ) + threshold = proto.Field( + proto.MESSAGE, + number=3, + message=model_monitoring.ThresholdConfig, + ) + training_stats = proto.Field( + proto.MESSAGE, + number=4, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + prediction_stats = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=feature_monitoring_stats.FeatureStatsAnomaly, + ) + + objective = proto.Field( + proto.ENUM, + number=1, + enum='ModelDeploymentMonitoringObjectiveType', + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + anomaly_count = proto.Field( + proto.INT32, + number=3, + ) + feature_stats = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=FeatureHistoricStatsAnomalies, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py new file mode 100644 index 0000000000..e2f6641aba --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ModelEvaluation', + }, +) + + +class ModelEvaluation(proto.Message): + r"""A collection of metrics calculated by comparing Model's + predictions on all of the test data against annotations from the + test data. + + Attributes: + name (str): + Output only. The resource name of the + ModelEvaluation. + metrics_schema_uri (str): + Output only. Points to a YAML file stored on Google Cloud + Storage describing the + [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] + of this ModelEvaluation. The schema is defined as an OpenAPI + 3.0.2 `Schema + Object `__. + metrics (google.protobuf.struct_pb2.Value): + Output only. Evaluation metrics of the Model. The schema of + the metrics is stored in + [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics_schema_uri] + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelEvaluation was created. + slice_dimensions (Sequence[str]): + Output only. All possible + [dimensions][ModelEvaluationSlice.slice.dimension] of + ModelEvaluationSlices. The dimensions can be used as the + filter of the + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] + request, in the form of ``slice.dimension = ``. + model_explanation (google.cloud.aiplatform_v1beta1.types.ModelExplanation): + Output only. Aggregated explanation metrics + for the Model's prediction output over the data + this ModelEvaluation uses. This field is + populated only if the Model is evaluated with + explanations, and only for AutoML tabular + Models. + explanation_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation.ModelEvaluationExplanationSpec]): + Output only. Describes the values of + [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] + that are used for explaining the predicted values on the + evaluated data. + """ + + class ModelEvaluationExplanationSpec(proto.Message): + r""" + Attributes: + explanation_type (str): + Explanation type. + + For AutoML Image Classification models, possible values are: + + - ``image-integrated-gradients`` + - ``image-xrai`` + explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): + Explanation spec details. + """ + + explanation_type = proto.Field( + proto.STRING, + number=1, + ) + explanation_spec = proto.Field( + proto.MESSAGE, + number=2, + message=explanation.ExplanationSpec, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + metrics_schema_uri = proto.Field( + proto.STRING, + number=2, + ) + metrics = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + slice_dimensions = proto.RepeatedField( + proto.STRING, + number=5, + ) + model_explanation = proto.Field( + proto.MESSAGE, + number=8, + message=explanation.ModelExplanation, + ) + explanation_specs = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=ModelEvaluationExplanationSpec, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py new file mode 100644 index 0000000000..ffa3738946 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ModelEvaluationSlice', + }, +) + + +class ModelEvaluationSlice(proto.Message): + r"""A collection of metrics calculated by comparing Model's + predictions on a slice of the test data against ground truth + annotations. + + Attributes: + name (str): + Output only. The resource name of the + ModelEvaluationSlice. + slice_ (google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice.Slice): + Output only. The slice of the test data that + is used to evaluate the Model. + metrics_schema_uri (str): + Output only. Points to a YAML file stored on Google Cloud + Storage describing the + [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] + of this ModelEvaluationSlice. The schema is defined as an + OpenAPI 3.0.2 `Schema + Object `__. + metrics (google.protobuf.struct_pb2.Value): + Output only. Sliced evaluation metrics of the Model. The + schema of the metrics is stored in + [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics_schema_uri] + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + ModelEvaluationSlice was created. + """ + + class Slice(proto.Message): + r"""Definition of a slice. + Attributes: + dimension (str): + Output only. The dimension of the slice. Well-known + dimensions are: + + - ``annotationSpec``: This slice is on the test data that + has either ground truth or prediction with + [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name] + equals to + [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value]. + value (str): + Output only. The value of the dimension in + this slice. + """ + + dimension = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + slice_ = proto.Field( + proto.MESSAGE, + number=2, + message=Slice, + ) + metrics_schema_uri = proto.Field( + proto.STRING, + number=3, + ) + metrics = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py new file mode 100644 index 0000000000..897ec7a0b6 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import io + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'ModelMonitoringObjectiveConfig', + 'ModelMonitoringAlertConfig', + 'ThresholdConfig', + 'SamplingStrategy', + }, +) + + +class ModelMonitoringObjectiveConfig(proto.Message): + r"""Next ID: 6 + Attributes: + training_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingDataset): + Training dataset for models. This field has + to be set only if + TrainingPredictionSkewDetectionConfig is + specified. + training_prediction_skew_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig): + The config for skew between training data and + prediction data. + prediction_drift_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): + The config for drift of prediction data. + """ + + class TrainingDataset(proto.Message): + r"""Training Dataset information. + Attributes: + dataset (str): + The resource name of the Dataset used to + train this Model. + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + The Google Cloud Storage uri of the unmanaged + Dataset used to train this Model. + bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): + The BigQuery table of the unmanaged Dataset + used to train this Model. + data_format (str): + Data format of the dataset, only applicable + if the input is from Google Cloud Storage. + The possible formats are: + + "tf-record" + The source file is a TFRecord file. + + "csv" + The source file is a CSV file. + target_field (str): + The target field name the model is to + predict. This field will be excluded when doing + Predict and (or) Explain for the training data. + logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): + Strategy to sample data from Training + Dataset. If not set, we process the whole + dataset. + """ + + dataset = proto.Field( + proto.STRING, + number=3, + oneof='data_source', + ) + gcs_source = proto.Field( + proto.MESSAGE, + number=4, + oneof='data_source', + message=io.GcsSource, + ) + bigquery_source = proto.Field( + proto.MESSAGE, + number=5, + oneof='data_source', + message=io.BigQuerySource, + ) + data_format = proto.Field( + proto.STRING, + number=2, + ) + target_field = proto.Field( + proto.STRING, + number=6, + ) + logging_sampling_strategy = proto.Field( + proto.MESSAGE, + number=7, + message='SamplingStrategy', + ) + + class TrainingPredictionSkewDetectionConfig(proto.Message): + r"""The config for Training & Prediction data skew detection. It + specifies the training dataset sources and the skew detection + parameters. + + Attributes: + skew_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.SkewThresholdsEntry]): + Key is the feature name and value is the + threshold. If a feature needs to be monitored + for skew, a value threshold must be configed for + that feature. The threshold here is against + feature distribution distance between the + training and prediction feature. + """ + + skew_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message='ThresholdConfig', + ) + + class PredictionDriftDetectionConfig(proto.Message): + r"""The config for Prediction data drift detection. + Attributes: + drift_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]): + Key is the feature name and value is the + threshold. If a feature needs to be monitored + for drift, a value threshold must be configed + for that feature. The threshold here is against + feature distribution distance between different + time windws. + """ + + drift_thresholds = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message='ThresholdConfig', + ) + + training_dataset = proto.Field( + proto.MESSAGE, + number=1, + message=TrainingDataset, + ) + training_prediction_skew_detection_config = proto.Field( + proto.MESSAGE, + number=2, + message=TrainingPredictionSkewDetectionConfig, + ) + prediction_drift_detection_config = proto.Field( + proto.MESSAGE, + number=3, + message=PredictionDriftDetectionConfig, + ) + + +class ModelMonitoringAlertConfig(proto.Message): + r"""Next ID: 2 + Attributes: + email_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig.EmailAlertConfig): + Email alert config. + """ + + class EmailAlertConfig(proto.Message): + r"""The config for email alert. + Attributes: + user_emails (Sequence[str]): + The email addresses to send the alert. + """ + + user_emails = proto.RepeatedField( + proto.STRING, + number=1, + ) + + email_alert_config = proto.Field( + proto.MESSAGE, + number=1, + oneof='alert', + message=EmailAlertConfig, + ) + + +class ThresholdConfig(proto.Message): + r"""The config for feature monitoring threshold. + Next ID: 3 + + Attributes: + value (float): + Specify a threshold value that can trigger + the alert. If this threshold config is for + feature distribution distance: 1. For + categorical feature, the distribution distance + is calculated by L-inifinity norm. + 2. For numerical feature, the distribution + distance is calculated by Jensen–Shannon + divergence. + Each feature must have a non-zero threshold if + they need to be monitored. Otherwise no alert + will be triggered for that feature. + """ + + value = proto.Field( + proto.DOUBLE, + number=1, + oneof='threshold', + ) + + +class SamplingStrategy(proto.Message): + r"""Sampling Strategy for logging, can be for both training and + prediction dataset. + Next ID: 2 + + Attributes: + random_sample_config (google.cloud.aiplatform_v1beta1.types.SamplingStrategy.RandomSampleConfig): + Random sample config. Will support more + sampling strategies later. + """ + + class RandomSampleConfig(proto.Message): + r"""Requests are randomly selected. + Attributes: + sample_rate (float): + Sample rate (0, 1] + """ + + sample_rate = proto.Field( + proto.DOUBLE, + number=1, + ) + + random_sample_config = proto.Field( + proto.MESSAGE, + number=1, + message=RandomSampleConfig, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py new file mode 100644 index 0000000000..32300b287a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py @@ -0,0 +1,569 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'UploadModelRequest', + 'UploadModelOperationMetadata', + 'UploadModelResponse', + 'GetModelRequest', + 'ListModelsRequest', + 'ListModelsResponse', + 'UpdateModelRequest', + 'DeleteModelRequest', + 'ExportModelRequest', + 'ExportModelOperationMetadata', + 'ExportModelResponse', + 'GetModelEvaluationRequest', + 'ListModelEvaluationsRequest', + 'ListModelEvaluationsResponse', + 'GetModelEvaluationSliceRequest', + 'ListModelEvaluationSlicesRequest', + 'ListModelEvaluationSlicesResponse', + }, +) + + +class UploadModelRequest(proto.Message): + r"""Request message for + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. + + Attributes: + parent (str): + Required. The resource name of the Location into which to + upload the Model. Format: + ``projects/{project}/locations/{location}`` + model (google.cloud.aiplatform_v1beta1.types.Model): + Required. The Model to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + model = proto.Field( + proto.MESSAGE, + number=2, + message=gca_model.Model, + ) + + +class UploadModelOperationMetadata(proto.Message): + r"""Details of + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UploadModelResponse(proto.Message): + r"""Response message of + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + operation. + + Attributes: + model (str): + The name of the uploaded Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + """ + + model = proto.Field( + proto.STRING, + number=1, + ) + + +class GetModelRequest(proto.Message): + r"""Request message for + [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. + + Attributes: + name (str): + Required. The name of the Model resource. Format: + ``projects/{project}/locations/{location}/models/{model}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelsRequest(proto.Message): + r"""Request message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + Models from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + An expression for filtering the results of the request. For + field names both snake_case and camelCase are supported. + + - ``model`` supports = and !=. ``model`` represents the + Model ID, i.e. the last segment of the Model's [resource + name][google.cloud.aiplatform.v1beta1.Model.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + Some examples: + + - ``model=1234`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelsResponse.next_page_token] + of the previous + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelsResponse(proto.Message): + r"""Response message for + [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] + + Attributes: + models (Sequence[google.cloud.aiplatform_v1beta1.types.Model]): + List of Models in the requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + models = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateModelRequest(proto.Message): + r"""Request message for + [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. + + Attributes: + model (google.cloud.aiplatform_v1beta1.types.Model): + Required. The Model which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the resource. For the + ``FieldMask`` definition, see + [google.protobuf.FieldMask][google.protobuf.FieldMask]. + """ + + model = proto.Field( + proto.MESSAGE, + number=1, + message=gca_model.Model, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteModelRequest(proto.Message): + r"""Request message for + [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. + + Attributes: + name (str): + Required. The name of the Model resource to be deleted. + Format: + ``projects/{project}/locations/{location}/models/{model}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ExportModelRequest(proto.Message): + r"""Request message for + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. + + Attributes: + name (str): + Required. The resource name of the Model to export. Format: + ``projects/{project}/locations/{location}/models/{model}`` + output_config (google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig): + Required. The desired output location and + configuration. + """ + + class OutputConfig(proto.Message): + r"""Output configuration for the Model export. + Attributes: + export_format_id (str): + The ID of the format in which the Model must be exported. + Each Model lists the [export formats it + supports][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + If no value is provided here, then the first from the list + of the Model's supported formats is used by default. + artifact_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Cloud Storage location where the Model artifact is to be + written to. Under the directory given as the destination a + new one with name + "``model-export--``", + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format, will be created. Inside, the Model and any of its + supporting files will be written. This field should only be + set when the ``exportableContent`` field of the + [Model.supported_export_formats] object contains + ``ARTIFACT``. + image_destination (google.cloud.aiplatform_v1beta1.types.ContainerRegistryDestination): + The Google Container Registry or Artifact Registry uri where + the Model container image will be copied to. This field + should only be set when the ``exportableContent`` field of + the [Model.supported_export_formats] object contains + ``IMAGE``. + """ + + export_format_id = proto.Field( + proto.STRING, + number=1, + ) + artifact_destination = proto.Field( + proto.MESSAGE, + number=3, + message=io.GcsDestination, + ) + image_destination = proto.Field( + proto.MESSAGE, + number=4, + message=io.ContainerRegistryDestination, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + output_config = proto.Field( + proto.MESSAGE, + number=2, + message=OutputConfig, + ) + + +class ExportModelOperationMetadata(proto.Message): + r"""Details of + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + output_info (google.cloud.aiplatform_v1beta1.types.ExportModelOperationMetadata.OutputInfo): + Output only. Information further describing + the output of this Model export. + """ + + class OutputInfo(proto.Message): + r"""Further describes the output of the ExportModel. Supplements + [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1beta1.ExportModelRequest.OutputConfig]. + + Attributes: + artifact_output_uri (str): + Output only. If the Model artifact is being + exported to Google Cloud Storage this is the + full path of the directory created, into which + the Model files are being written to. + image_output_uri (str): + Output only. If the Model image is being + exported to Google Container Registry or + Artifact Registry this is the full path of the + image created. + """ + + artifact_output_uri = proto.Field( + proto.STRING, + number=2, + ) + image_output_uri = proto.Field( + proto.STRING, + number=3, + ) + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + output_info = proto.Field( + proto.MESSAGE, + number=2, + message=OutputInfo, + ) + + +class ExportModelResponse(proto.Message): + r"""Response message of + [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] + operation. + """ + + +class GetModelEvaluationRequest(proto.Message): + r"""Request message for + [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. + + Attributes: + name (str): + Required. The name of the ModelEvaluation resource. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationsRequest(proto.Message): + r"""Request message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + + Attributes: + parent (str): + Required. The resource name of the Model to list the + ModelEvaluations from. Format: + ``projects/{project}/locations/{location}/models/{model}`` + filter (str): + The standard list filter. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsResponse.next_page_token] + of the previous + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelEvaluationsResponse(proto.Message): + r"""Response message for + [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. + + Attributes: + model_evaluations (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation]): + List of ModelEvaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model_evaluation.ModelEvaluation, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class GetModelEvaluationSliceRequest(proto.Message): + r"""Request message for + [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. + + Attributes: + name (str): + Required. The name of the ModelEvaluationSlice resource. + Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListModelEvaluationSlicesRequest(proto.Message): + r"""Request message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + + Attributes: + parent (str): + Required. The resource name of the ModelEvaluation to list + the ModelEvaluationSlices from. Format: + ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` + filter (str): + The standard list filter. + + - ``slice.dimension`` - for =. + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesResponse.next_page_token] + of the previous + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListModelEvaluationSlicesResponse(proto.Message): + r"""Response message for + [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. + + Attributes: + model_evaluation_slices (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice]): + List of ModelEvaluations in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + model_evaluation_slices = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=model_evaluation_slice.ModelEvaluationSlice, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py new file mode 100644 index 0000000000..c047e3c60c --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'GenericOperationMetadata', + 'DeleteOperationMetadata', + }, +) + + +class GenericOperationMetadata(proto.Message): + r"""Generic Metadata shared by all operations. + Attributes: + partial_failures (Sequence[google.rpc.status_pb2.Status]): + Output only. Partial failures encountered. + E.g. single files that couldn't be read. + This field should never exceed 20 entries. + Status details field will contain standard GCP + error details. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the operation was + updated for the last time. If the operation has + finished (successfully or not), this is the + finish time. + """ + + partial_failures = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=status_pb2.Status, + ) + create_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteOperationMetadata(proto.Message): + r"""Details of operations that perform deletes of any entities. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message='GenericOperationMetadata', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py new file mode 100644 index 0000000000..fff759149d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -0,0 +1,436 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.cloud.aiplatform_v1beta1.types import value as gca_value +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'PipelineJob', + 'PipelineJobDetail', + 'PipelineTaskDetail', + 'PipelineTaskExecutorDetail', + }, +) + + +class PipelineJob(proto.Message): + r"""An instance of a machine learning PipelineJob. + Attributes: + name (str): + Output only. The resource name of the + PipelineJob. + display_name (str): + The display name of the Pipeline. + The name can be up to 128 characters long and + can be consist of any UTF-8 characters. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline creation time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Pipeline end time. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this PipelineJob + was most recently updated. + pipeline_spec (google.protobuf.struct_pb2.Struct): + Required. The spec of the pipeline. + state (google.cloud.aiplatform_v1beta1.types.PipelineState): + Output only. The detailed state of the job. + job_detail (google.cloud.aiplatform_v1beta1.types.PipelineJobDetail): + Output only. The details of pipeline run. Not + available in the list view. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + pipeline execution. Only populated when the + pipeline's state is FAILED or CANCELLED. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.LabelsEntry]): + The labels with user-defined metadata to + organize PipelineJob. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + runtime_config (google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig): + Runtime config of the pipeline. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + pipelineJob. If set, this PipelineJob and all of + its sub-resources will be secured by this key. + service_account (str): + The service account that the pipeline workload runs as. If + not specified, the Compute Engine default service account in + the project will be used. See + https://cloud.google.com/compute/docs/access/service-accounts#default_service_account + + Users starting the pipeline must have the + ``iam.serviceAccounts.actAs`` permission on this service + account. + network (str): + The full name of the Compute Engine + `network `__ + to which the Pipeline Job's workload should be peered. For + example, ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + Private services access must already be configured for the + network. Pipeline job will apply the network configuration + to the GCP resources being launched, if applied, such as + Vertex AI Training or Dataflow job. If left unspecified, the + workload is not peered with any network. + """ + + class RuntimeConfig(proto.Message): + r"""The runtime config of a PipelineJob. + Attributes: + parameters (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParametersEntry]): + The runtime parameters of the PipelineJob. The parameters + will be passed into + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] + to replace the placeholders at runtime. + gcs_output_directory (str): + Required. A path in a Cloud Storage bucket, which will be + treated as the root output directory of the pipeline. It is + used by the system to generate the paths of output + artifacts. The artifact paths are generated with a sub-path + pattern ``{job_id}/{task_id}/{output_key}`` under the + specified output directory. The service account specified in + this pipeline must have the ``storage.objects.get`` and + ``storage.objects.create`` permissions for this bucket. + """ + + parameters = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=1, + message=gca_value.Value, + ) + gcs_output_directory = proto.Field( + proto.STRING, + number=2, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + pipeline_spec = proto.Field( + proto.MESSAGE, + number=7, + message=struct_pb2.Struct, + ) + state = proto.Field( + proto.ENUM, + number=8, + enum=pipeline_state.PipelineState, + ) + job_detail = proto.Field( + proto.MESSAGE, + number=9, + message='PipelineJobDetail', + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=11, + ) + runtime_config = proto.Field( + proto.MESSAGE, + number=12, + message=RuntimeConfig, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=16, + message=gca_encryption_spec.EncryptionSpec, + ) + service_account = proto.Field( + proto.STRING, + number=17, + ) + network = proto.Field( + proto.STRING, + number=18, + ) + + +class PipelineJobDetail(proto.Message): + r"""The runtime detail of PipelineJob. + Attributes: + pipeline_context (google.cloud.aiplatform_v1beta1.types.Context): + Output only. The context of the pipeline. + pipeline_run_context (google.cloud.aiplatform_v1beta1.types.Context): + Output only. The context of the current + pipeline run. + task_details (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail]): + Output only. The runtime details of the tasks + under the pipeline. + """ + + pipeline_context = proto.Field( + proto.MESSAGE, + number=1, + message=context.Context, + ) + pipeline_run_context = proto.Field( + proto.MESSAGE, + number=2, + message=context.Context, + ) + task_details = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='PipelineTaskDetail', + ) + + +class PipelineTaskDetail(proto.Message): + r"""The runtime detail of a task execution. + Attributes: + task_id (int): + Output only. The system generated ID of the + task. + parent_task_id (int): + Output only. The id of the parent task if the + task is within a component scope. Empty if the + task is at the root level. + task_name (str): + Output only. The user specified name of the task that is + defined in [PipelineJob.spec][]. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task create time. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task start time. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Task end time. + executor_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail): + Output only. The detailed execution info. + state (google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.State): + Output only. State of the task. + execution (google.cloud.aiplatform_v1beta1.types.Execution): + Output only. The execution metadata of the + task. + error (google.rpc.status_pb2.Status): + Output only. The error that occurred during + task execution. Only populated when the task's + state is FAILED or CANCELLED. + inputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.InputsEntry]): + Output only. The runtime input artifacts of + the task. + outputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.OutputsEntry]): + Output only. The runtime output artifacts of + the task. + """ + class State(proto.Enum): + r"""Specifies state of TaskExecution""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + SUCCEEDED = 3 + CANCEL_PENDING = 4 + CANCELLING = 5 + CANCELLED = 6 + FAILED = 7 + SKIPPED = 8 + NOT_TRIGGERED = 9 + + class ArtifactList(proto.Message): + r"""A list of artifact metadata. + Attributes: + artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): + Output only. A list of artifact metadata. + """ + + artifacts = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=artifact.Artifact, + ) + + task_id = proto.Field( + proto.INT64, + number=1, + ) + parent_task_id = proto.Field( + proto.INT64, + number=12, + ) + task_name = proto.Field( + proto.STRING, + number=2, + ) + create_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + executor_detail = proto.Field( + proto.MESSAGE, + number=6, + message='PipelineTaskExecutorDetail', + ) + state = proto.Field( + proto.ENUM, + number=7, + enum=State, + ) + execution = proto.Field( + proto.MESSAGE, + number=8, + message=gca_execution.Execution, + ) + error = proto.Field( + proto.MESSAGE, + number=9, + message=status_pb2.Status, + ) + inputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=10, + message=ArtifactList, + ) + outputs = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=11, + message=ArtifactList, + ) + + +class PipelineTaskExecutorDetail(proto.Message): + r"""The runtime detail of a pipeline executor. + Attributes: + container_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.ContainerDetail): + Output only. The detailed info for a + container executor. + custom_job_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.CustomJobDetail): + Output only. The detailed info for a custom + job executor. + """ + + class ContainerDetail(proto.Message): + r"""The detail of a container execution. It contains the job + names of the lifecycle of a container execution. + + Attributes: + main_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for + the main container execution. + pre_caching_check_job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for + the pre-caching-check container execution. This job will be + available if the + [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] + specifies the ``pre_caching_check`` hook in the lifecycle + events. + """ + + main_job = proto.Field( + proto.STRING, + number=1, + ) + pre_caching_check_job = proto.Field( + proto.STRING, + number=2, + ) + + class CustomJobDetail(proto.Message): + r"""The detailed info for a custom job executor. + Attributes: + job (str): + Output only. The name of the + [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob]. + """ + + job = proto.Field( + proto.STRING, + number=1, + ) + + container_detail = proto.Field( + proto.MESSAGE, + number=1, + oneof='details', + message=ContainerDetail, + ) + custom_job_detail = proto.Field( + proto.MESSAGE, + number=2, + oneof='details', + message=CustomJobDetail, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py new file mode 100644 index 0000000000..2043ad1221 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateTrainingPipelineRequest', + 'GetTrainingPipelineRequest', + 'ListTrainingPipelinesRequest', + 'ListTrainingPipelinesResponse', + 'DeleteTrainingPipelineRequest', + 'CancelTrainingPipelineRequest', + 'CreatePipelineJobRequest', + 'GetPipelineJobRequest', + 'ListPipelineJobsRequest', + 'ListPipelineJobsResponse', + 'DeletePipelineJobRequest', + 'CancelPipelineJobRequest', + }, +) + + +class CreateTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + TrainingPipeline in. Format: + ``projects/{project}/locations/{location}`` + training_pipeline (google.cloud.aiplatform_v1beta1.types.TrainingPipeline): + Required. The TrainingPipeline to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + training_pipeline = proto.Field( + proto.MESSAGE, + number=2, + message=gca_training_pipeline.TrainingPipeline, + ) + + +class GetTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline resource. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTrainingPipelinesRequest(proto.Message): + r"""Request message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + TrainingPipelines from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. Supported fields: + + - ``display_name`` supports = and !=. + + - ``state`` supports = and !=. + + Some examples of using the filter are: + + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` + + - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` + + - ``NOT display_name="my_pipeline"`` + + - ``state="PIPELINE_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token] + of the previous + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + + +class ListTrainingPipelinesResponse(proto.Message): + r"""Response message for + [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] + + Attributes: + training_pipelines (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline]): + List of TrainingPipelines in the requested + page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + training_pipelines = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_training_pipeline.TrainingPipeline, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline resource to be + deleted. Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelTrainingPipelineRequest(proto.Message): + r"""Request message for + [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. + + Attributes: + name (str): + Required. The name of the TrainingPipeline to cancel. + Format: + ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreatePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): + Required. The PipelineJob to create. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not provided, an + ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + pipeline_job = proto.Field( + proto.MESSAGE, + number=2, + message=gca_pipeline_job.PipelineJob, + ) + pipeline_job_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListPipelineJobsRequest(proto.Message): + r"""Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. Supported fields: + + - ``display_name`` supports ``=`` and ``!=``. + - ``state`` supports ``=`` and ``!=``. + + The following examples demonstrate how to filter the list of + PipelineJobs: + + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` + - ``NOT display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsResponse.next_page_token] + of the previous + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + call. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + + +class ListPipelineJobsResponse(proto.Message): + r"""Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] + + Attributes: + pipeline_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob]): + List of PipelineJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + pipeline_jobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_pipeline_job.PipelineJob, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeletePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CancelPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py new file mode 100644 index 0000000000..83459cab69 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'PipelineState', + }, +) + + +class PipelineState(proto.Enum): + r"""Describes the state of a pipeline.""" + PIPELINE_STATE_UNSPECIFIED = 0 + PIPELINE_STATE_QUEUED = 1 + PIPELINE_STATE_PENDING = 2 + PIPELINE_STATE_RUNNING = 3 + PIPELINE_STATE_SUCCEEDED = 4 + PIPELINE_STATE_FAILED = 5 + PIPELINE_STATE_CANCELLING = 6 + PIPELINE_STATE_CANCELLED = 7 + PIPELINE_STATE_PAUSED = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py new file mode 100644 index 0000000000..b38a2c1f34 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import explanation +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'PredictRequest', + 'PredictResponse', + 'ExplainRequest', + 'ExplainResponse', + }, +) + + +class PredictRequest(proto.Message): + r"""Request message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + prediction. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the prediction + call. A DeployedModel may have an upper limit on the number + of instances it supports per request, and when it is + exceeded the prediction call errors in case of AutoML + Models, or, in case of customer created Models, the + behaviour is as documented by that Model. The schema of any + single instance may be specified via Endpoint's + DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of the + parameters may be specified via Endpoint's DeployedModels' + [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + instances = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + parameters = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + + +class PredictResponse(proto.Message): + r"""Response message for + [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. + + Attributes: + predictions (Sequence[google.protobuf.struct_pb2.Value]): + The predictions that are the output of the predictions call. + The schema of any single prediction may be specified via + Endpoint's DeployedModels' [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. + deployed_model_id (str): + ID of the Endpoint's DeployedModel that + served this prediction. + """ + + predictions = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=struct_pb2.Value, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + + +class ExplainRequest(proto.Message): + r"""Request message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + explanation. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}`` + instances (Sequence[google.protobuf.struct_pb2.Value]): + Required. The instances that are the input to the + explanation call. A DeployedModel may have an upper limit on + the number of instances it supports per request, and when it + is exceeded the explanation call errors in case of AutoML + Models, or, in case of customer created Models, the + behaviour is as documented by that Model. The schema of any + single instance may be specified via Endpoint's + DeployedModels' + [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of the + parameters may be specified via Endpoint's DeployedModels' + [Model's + ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + explanation_spec_override (google.cloud.aiplatform_v1beta1.types.ExplanationSpecOverride): + If specified, overrides the + [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + of the DeployedModel. Can be used for explaining prediction + results with different configurations, such as: + + - Explaining top-5 predictions results as opposed to top-1; + - Increasing path count or step count of the attribution + methods to reduce approximate errors; + - Using different baselines for explaining the prediction + results. + deployed_model_id (str): + If specified, this ExplainRequest will be served by the + chosen DeployedModel, overriding + [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. + """ + + endpoint = proto.Field( + proto.STRING, + number=1, + ) + instances = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + parameters = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Value, + ) + explanation_spec_override = proto.Field( + proto.MESSAGE, + number=5, + message=explanation.ExplanationSpecOverride, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=3, + ) + + +class ExplainResponse(proto.Message): + r"""Response message for + [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + + Attributes: + explanations (Sequence[google.cloud.aiplatform_v1beta1.types.Explanation]): + The explanations of the Model's + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. + + It has the same number of elements as + [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + to be explained. + deployed_model_id (str): + ID of the Endpoint's DeployedModel that + served this explanation. + predictions (Sequence[google.protobuf.struct_pb2.Value]): + The predictions that are the output of the predictions call. + Same as + [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. + """ + + explanations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=explanation.Explanation, + ) + deployed_model_id = proto.Field( + proto.STRING, + number=2, + ) + predictions = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=struct_pb2.Value, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py new file mode 100644 index 0000000000..a985c80f43 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'SpecialistPool', + }, +) + + +class SpecialistPool(proto.Message): + r"""SpecialistPool represents customers' own workforce to work on + their data labeling jobs. It includes a group of specialist + managers who are responsible for managing the labelers in this + pool as well as customers' data labeling jobs associated with + this pool. + Customers create specialist pool as well as start data labeling + jobs on Cloud, managers and labelers work with the jobs using + CrowdCompute console. + + Attributes: + name (str): + Required. The resource name of the + SpecialistPool. + display_name (str): + Required. The user-defined name of the + SpecialistPool. The name can be up to 128 + characters long and can be consist of any UTF-8 + characters. + This field should be unique on project-level. + specialist_managers_count (int): + Output only. The number of Specialists in + this SpecialistPool. + specialist_manager_emails (Sequence[str]): + The email addresses of the specialists in the + SpecialistPool. + pending_data_labeling_jobs (Sequence[str]): + Output only. The resource name of the pending + data labeling jobs. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + specialist_managers_count = proto.Field( + proto.INT32, + number=3, + ) + specialist_manager_emails = proto.RepeatedField( + proto.STRING, + number=4, + ) + pending_data_labeling_jobs = proto.RepeatedField( + proto.STRING, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py new file mode 100644 index 0000000000..c6ebb83779 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py @@ -0,0 +1,237 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import operation +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateSpecialistPoolRequest', + 'CreateSpecialistPoolOperationMetadata', + 'GetSpecialistPoolRequest', + 'ListSpecialistPoolsRequest', + 'ListSpecialistPoolsResponse', + 'DeleteSpecialistPoolRequest', + 'UpdateSpecialistPoolRequest', + 'UpdateSpecialistPoolOperationMetadata', + }, +) + + +class CreateSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + + Attributes: + parent (str): + Required. The parent Project name for the new + SpecialistPool. The form is + ``projects/{project}/locations/{location}``. + specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): + Required. The SpecialistPool to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + specialist_pool = proto.Field( + proto.MESSAGE, + number=2, + message=gca_specialist_pool.SpecialistPool, + ) + + +class CreateSpecialistPoolOperationMetadata(proto.Message): + r"""Runtime operation information for + [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. + + Attributes: + name (str): + Required. The name of the SpecialistPool resource. The form + is + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListSpecialistPoolsRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + + Attributes: + parent (str): + Required. The name of the SpecialistPool's parent resource. + Format: ``projects/{project}/locations/{location}`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained by + [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListSpecialistPoolsResponse.next_page_token] + of the previous + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools] + call. Return first page if empty. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + FieldMask represents a set of + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=4, + message=field_mask_pb2.FieldMask, + ) + + +class ListSpecialistPoolsResponse(proto.Message): + r"""Response message for + [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. + + Attributes: + specialist_pools (Sequence[google.cloud.aiplatform_v1beta1.types.SpecialistPool]): + A list of SpecialistPools that matches the + specified filter in the request. + next_page_token (str): + The standard List next-page token. + """ + + @property + def raw_page(self): + return self + + specialist_pools = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_specialist_pool.SpecialistPool, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. + + Attributes: + name (str): + Required. The resource name of the SpecialistPool to delete. + Format: + ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` + force (bool): + If set to true, any specialist managers in + this SpecialistPool will also be deleted. + (Otherwise, the request will only work if the + SpecialistPool has no specialist managers.) + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + force = proto.Field( + proto.BOOL, + number=2, + ) + + +class UpdateSpecialistPoolRequest(proto.Message): + r"""Request message for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + + Attributes: + specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): + Required. The SpecialistPool which replaces + the resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The update mask applies to the + resource. + """ + + specialist_pool = proto.Field( + proto.MESSAGE, + number=1, + message=gca_specialist_pool.SpecialistPool, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateSpecialistPoolOperationMetadata(proto.Message): + r"""Runtime operation metadata for + [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. + + Attributes: + specialist_pool (str): + Output only. The name of the SpecialistPool to which the + specialists are being added. Format: + ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`` + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + specialist_pool = proto.Field( + proto.STRING, + number=1, + ) + generic_metadata = proto.Field( + proto.MESSAGE, + number=2, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py new file mode 100644 index 0000000000..4e2a1161ba --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py @@ -0,0 +1,748 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Study', + 'Trial', + 'StudySpec', + 'Measurement', + }, +) + + +class Study(proto.Message): + r"""A message representing a Study. + Attributes: + name (str): + Output only. The name of a study. The study's globally + unique identifier. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + display_name (str): + Required. Describes the Study, default value + is empty string. + study_spec (google.cloud.aiplatform_v1beta1.types.StudySpec): + Required. Configuration of the Study. + state (google.cloud.aiplatform_v1beta1.types.Study.State): + Output only. The detailed state of a Study. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time at which the study was + created. + inactive_reason (str): + Output only. A human readable reason why the + Study is inactive. This should be empty if a + study is ACTIVE or COMPLETED. + """ + class State(proto.Enum): + r"""Describes the Study state.""" + STATE_UNSPECIFIED = 0 + ACTIVE = 1 + INACTIVE = 2 + COMPLETED = 3 + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + study_spec = proto.Field( + proto.MESSAGE, + number=3, + message='StudySpec', + ) + state = proto.Field( + proto.ENUM, + number=4, + enum=State, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + inactive_reason = proto.Field( + proto.STRING, + number=6, + ) + + +class Trial(proto.Message): + r"""A message representing a Trial. A Trial contains a unique set + of Parameters that has been or will be evaluated, along with the + objective metrics got by running the Trial. + + Attributes: + name (str): + Output only. Resource name of the Trial + assigned by the service. + id (str): + Output only. The identifier of the Trial + assigned by the service. + state (google.cloud.aiplatform_v1beta1.types.Trial.State): + Output only. The detailed state of the Trial. + parameters (Sequence[google.cloud.aiplatform_v1beta1.types.Trial.Parameter]): + Output only. The parameters of the Trial. + final_measurement (google.cloud.aiplatform_v1beta1.types.Measurement): + Output only. The final measurement containing + the objective value. + measurements (Sequence[google.cloud.aiplatform_v1beta1.types.Measurement]): + Output only. A list of measurements that are strictly + lexicographically ordered by their induced tuples (steps, + elapsed_duration). These are used for early stopping + computations. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the Trial was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the Trial's status changed to + ``SUCCEEDED`` or ``INFEASIBLE``. + client_id (str): + Output only. The identifier of the client that originally + requested this Trial. Each client is identified by a unique + client_id. When a client asks for a suggestion, Vizier will + assign it a Trial. The client should evaluate the Trial, + complete it, and report back to Vizier. If suggestion is + asked again by same client_id before the Trial is completed, + the same Trial will be returned. Multiple clients with + different client_ids can ask for suggestions simultaneously, + each of them will get their own Trial. + infeasible_reason (str): + Output only. A human readable string describing why the + Trial is infeasible. This is set only if Trial state is + ``INFEASIBLE``. + custom_job (str): + Output only. The CustomJob name linked to the + Trial. It's set for a HyperparameterTuningJob's + Trial. + """ + class State(proto.Enum): + r"""Describes a Trial state.""" + STATE_UNSPECIFIED = 0 + REQUESTED = 1 + ACTIVE = 2 + STOPPING = 3 + SUCCEEDED = 4 + INFEASIBLE = 5 + + class Parameter(proto.Message): + r"""A message representing a parameter to be tuned. + Attributes: + parameter_id (str): + Output only. The ID of the parameter. The parameter should + be defined in [StudySpec's + Parameters][google.cloud.aiplatform.v1beta1.StudySpec.parameters]. + value (google.protobuf.struct_pb2.Value): + Output only. The value of the parameter. ``number_value`` + will be set if a parameter defined in StudySpec is in type + 'INTEGER', 'DOUBLE' or 'DISCRETE'. ``string_value`` will be + set if a parameter defined in StudySpec is in type + 'CATEGORICAL'. + """ + + parameter_id = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.MESSAGE, + number=2, + message=struct_pb2.Value, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + id = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=Parameter, + ) + final_measurement = proto.Field( + proto.MESSAGE, + number=5, + message='Measurement', + ) + measurements = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='Measurement', + ) + start_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + client_id = proto.Field( + proto.STRING, + number=9, + ) + infeasible_reason = proto.Field( + proto.STRING, + number=10, + ) + custom_job = proto.Field( + proto.STRING, + number=11, + ) + + +class StudySpec(proto.Message): + r"""Represents specification of a Study. + Attributes: + decay_curve_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.DecayCurveAutomatedStoppingSpec): + The automated early stopping spec using decay + curve rule. + median_automated_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.MedianAutomatedStoppingSpec): + The automated early stopping spec using + median rule. + convex_stop_config (google.cloud.aiplatform_v1beta1.types.StudySpec.ConvexStopConfig): + The automated early stopping using convex + stopping rule. + metrics (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec]): + Required. Metric specs for the Study. + parameters (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec]): + Required. The set of parameters to tune. + algorithm (google.cloud.aiplatform_v1beta1.types.StudySpec.Algorithm): + The search algorithm specified for the Study. + observation_noise (google.cloud.aiplatform_v1beta1.types.StudySpec.ObservationNoise): + The observation noise level of the study. + Currently only supported by the Vizier service. + Not supported by HyperparamterTuningJob or + TrainingPipeline. + measurement_selection_type (google.cloud.aiplatform_v1beta1.types.StudySpec.MeasurementSelectionType): + Describe which measurement selection type + will be used + """ + class Algorithm(proto.Enum): + r"""The available search algorithms for the Study.""" + ALGORITHM_UNSPECIFIED = 0 + GRID_SEARCH = 2 + RANDOM_SEARCH = 3 + + class ObservationNoise(proto.Enum): + r"""Describes the noise level of the repeated observations. + "Noisy" means that the repeated observations with the same Trial + parameters may lead to different metric evaluations. + """ + OBSERVATION_NOISE_UNSPECIFIED = 0 + LOW = 1 + HIGH = 2 + + class MeasurementSelectionType(proto.Enum): + r"""This indicates which measurement to use if/when the service + automatically selects the final measurement from previously reported + intermediate measurements. Choose this based on two considerations: + A) Do you expect your measurements to monotonically improve? If so, + choose LAST_MEASUREMENT. On the other hand, if you're in a situation + where your system can "over-train" and you expect the performance to + get better for a while but then start declining, choose + BEST_MEASUREMENT. B) Are your measurements significantly noisy + and/or irreproducible? If so, BEST_MEASUREMENT will tend to be + over-optimistic, and it may be better to choose LAST_MEASUREMENT. If + both or neither of (A) and (B) apply, it doesn't matter which + selection type is chosen. + """ + MEASUREMENT_SELECTION_TYPE_UNSPECIFIED = 0 + LAST_MEASUREMENT = 1 + BEST_MEASUREMENT = 2 + + class MetricSpec(proto.Message): + r"""Represents a metric to optimize. + Attributes: + metric_id (str): + Required. The ID of the metric. Must not + contain whitespaces and must be unique amongst + all MetricSpecs. + goal (google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec.GoalType): + Required. The optimization goal of the + metric. + """ + class GoalType(proto.Enum): + r"""The available types of optimization goals.""" + GOAL_TYPE_UNSPECIFIED = 0 + MAXIMIZE = 1 + MINIMIZE = 2 + + metric_id = proto.Field( + proto.STRING, + number=1, + ) + goal = proto.Field( + proto.ENUM, + number=2, + enum='StudySpec.MetricSpec.GoalType', + ) + + class ParameterSpec(proto.Message): + r"""Represents a single parameter to optimize. + Attributes: + double_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.DoubleValueSpec): + The value spec for a 'DOUBLE' parameter. + integer_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.IntegerValueSpec): + The value spec for an 'INTEGER' parameter. + categorical_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.CategoricalValueSpec): + The value spec for a 'CATEGORICAL' parameter. + discrete_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.DiscreteValueSpec): + The value spec for a 'DISCRETE' parameter. + parameter_id (str): + Required. The ID of the parameter. Must not + contain whitespaces and must be unique amongst + all ParameterSpecs. + scale_type (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ScaleType): + How the parameter should be scaled. Leave unset for + ``CATEGORICAL`` parameters. + conditional_parameter_specs (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec]): + A conditional parameter node is active if the parameter's + value matches the conditional node's parent_value_condition. + + If two items in conditional_parameter_specs have the same + name, they must have disjoint parent_value_condition. + """ + class ScaleType(proto.Enum): + r"""The type of scaling that should be applied to this parameter.""" + SCALE_TYPE_UNSPECIFIED = 0 + UNIT_LINEAR_SCALE = 1 + UNIT_LOG_SCALE = 2 + UNIT_REVERSE_LOG_SCALE = 3 + + class DoubleValueSpec(proto.Message): + r"""Value specification for a parameter in ``DOUBLE`` type. + Attributes: + min_value (float): + Required. Inclusive minimum value of the + parameter. + max_value (float): + Required. Inclusive maximum value of the + parameter. + """ + + min_value = proto.Field( + proto.DOUBLE, + number=1, + ) + max_value = proto.Field( + proto.DOUBLE, + number=2, + ) + + class IntegerValueSpec(proto.Message): + r"""Value specification for a parameter in ``INTEGER`` type. + Attributes: + min_value (int): + Required. Inclusive minimum value of the + parameter. + max_value (int): + Required. Inclusive maximum value of the + parameter. + """ + + min_value = proto.Field( + proto.INT64, + number=1, + ) + max_value = proto.Field( + proto.INT64, + number=2, + ) + + class CategoricalValueSpec(proto.Message): + r"""Value specification for a parameter in ``CATEGORICAL`` type. + Attributes: + values (Sequence[str]): + Required. The list of possible categories. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + class DiscreteValueSpec(proto.Message): + r"""Value specification for a parameter in ``DISCRETE`` type. + Attributes: + values (Sequence[float]): + Required. A list of possible values. + The list should be in increasing order and at + least 1e-10 apart. For instance, this parameter + might have possible settings of 1.5, 2.5, and + 4.0. This list should not contain more than + 1,000 values. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + + class ConditionalParameterSpec(proto.Message): + r"""Represents a parameter spec with condition from its parent + parameter. + + Attributes: + parent_discrete_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition): + The spec for matching values from a parent parameter of + ``DISCRETE`` type. + parent_int_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition): + The spec for matching values from a parent parameter of + ``INTEGER`` type. + parent_categorical_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition): + The spec for matching values from a parent parameter of + ``CATEGORICAL`` type. + parameter_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec): + Required. The spec for a conditional + parameter. + """ + + class DiscreteValueCondition(proto.Message): + r"""Represents the spec to match discrete values from parent + parameter. + + Attributes: + values (Sequence[float]): + Required. Matches values of the parent parameter of + 'DISCRETE' type. All values must exist in + ``discrete_value_spec`` of parent parameter. + + The Epsilon of the value matching is 1e-10. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + + class IntValueCondition(proto.Message): + r"""Represents the spec to match integer values from parent + parameter. + + Attributes: + values (Sequence[int]): + Required. Matches values of the parent parameter of + 'INTEGER' type. All values must lie in + ``integer_value_spec`` of parent parameter. + """ + + values = proto.RepeatedField( + proto.INT64, + number=1, + ) + + class CategoricalValueCondition(proto.Message): + r"""Represents the spec to match categorical values from parent + parameter. + + Attributes: + values (Sequence[str]): + Required. Matches values of the parent parameter of + 'CATEGORICAL' type. All values must exist in + ``categorical_value_spec`` of parent parameter. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + parent_discrete_values = proto.Field( + proto.MESSAGE, + number=2, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition', + ) + parent_int_values = proto.Field( + proto.MESSAGE, + number=3, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition', + ) + parent_categorical_values = proto.Field( + proto.MESSAGE, + number=4, + oneof='parent_value_condition', + message='StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition', + ) + parameter_spec = proto.Field( + proto.MESSAGE, + number=1, + message='StudySpec.ParameterSpec', + ) + + double_value_spec = proto.Field( + proto.MESSAGE, + number=2, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.DoubleValueSpec', + ) + integer_value_spec = proto.Field( + proto.MESSAGE, + number=3, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.IntegerValueSpec', + ) + categorical_value_spec = proto.Field( + proto.MESSAGE, + number=4, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.CategoricalValueSpec', + ) + discrete_value_spec = proto.Field( + proto.MESSAGE, + number=5, + oneof='parameter_value_spec', + message='StudySpec.ParameterSpec.DiscreteValueSpec', + ) + parameter_id = proto.Field( + proto.STRING, + number=1, + ) + scale_type = proto.Field( + proto.ENUM, + number=6, + enum='StudySpec.ParameterSpec.ScaleType', + ) + conditional_parameter_specs = proto.RepeatedField( + proto.MESSAGE, + number=10, + message='StudySpec.ParameterSpec.ConditionalParameterSpec', + ) + + class DecayCurveAutomatedStoppingSpec(proto.Message): + r"""The decay curve automated stopping rule builds a Gaussian + Process Regressor to predict the final objective value of a + Trial based on the already completed Trials and the intermediate + measurements of the current Trial. Early stopping is requested + for the current Trial if there is very low probability to exceed + the optimal value found so far. + + Attributes: + use_elapsed_duration (bool): + True if + [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration] + is used as the x-axis of each Trials Decay Curve. Otherwise, + [Measurement.step_count][google.cloud.aiplatform.v1beta1.Measurement.step_count] + will be used as the x-axis. + """ + + use_elapsed_duration = proto.Field( + proto.BOOL, + number=1, + ) + + class MedianAutomatedStoppingSpec(proto.Message): + r"""The median automated stopping rule stops a pending Trial if the + Trial's best objective_value is strictly below the median + 'performance' of all completed Trials reported up to the Trial's + last measurement. Currently, 'performance' refers to the running + average of the objective values reported by the Trial in each + measurement. + + Attributes: + use_elapsed_duration (bool): + True if median automated stopping rule applies on + [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration]. + It means that elapsed_duration field of latest measurement + of current Trial is used to compute median objective value + for each completed Trials. + """ + + use_elapsed_duration = proto.Field( + proto.BOOL, + number=1, + ) + + class ConvexStopConfig(proto.Message): + r"""Configuration for ConvexStopPolicy. + Attributes: + max_num_steps (int): + Steps used in predicting the final objective for early + stopped trials. In general, it's set to be the same as the + defined steps in training / tuning. When use_steps is false, + this field is set to the maximum elapsed seconds. + min_num_steps (int): + Minimum number of steps for a trial to complete. Trials + which do not have a measurement with num_steps > + min_num_steps won't be considered for early stopping. It's + ok to set it to 0, and a trial can be early stopped at any + stage. By default, min_num_steps is set to be one-tenth of + the max_num_steps. When use_steps is false, this field is + set to the minimum elapsed seconds. + autoregressive_order (int): + The number of Trial measurements used in + autoregressive model for value prediction. A + trial won't be considered early stopping if has + fewer measurement points. + learning_rate_parameter_name (str): + The hyper-parameter name used in the tuning job that stands + for learning rate. Leave it blank if learning rate is not in + a parameter in tuning. The learning_rate is used to estimate + the objective value of the ongoing trial. + use_seconds (bool): + This bool determines whether or not the rule is applied + based on elapsed_secs or steps. If use_seconds==false, the + early stopping decision is made according to the predicted + objective values according to the target steps. If + use_seconds==true, elapsed_secs is used instead of steps. + Also, in this case, the parameters max_num_steps and + min_num_steps are overloaded to contain max_elapsed_seconds + and min_elapsed_seconds. + """ + + max_num_steps = proto.Field( + proto.INT64, + number=1, + ) + min_num_steps = proto.Field( + proto.INT64, + number=2, + ) + autoregressive_order = proto.Field( + proto.INT64, + number=3, + ) + learning_rate_parameter_name = proto.Field( + proto.STRING, + number=4, + ) + use_seconds = proto.Field( + proto.BOOL, + number=5, + ) + + decay_curve_stopping_spec = proto.Field( + proto.MESSAGE, + number=4, + oneof='automated_stopping_spec', + message=DecayCurveAutomatedStoppingSpec, + ) + median_automated_stopping_spec = proto.Field( + proto.MESSAGE, + number=5, + oneof='automated_stopping_spec', + message=MedianAutomatedStoppingSpec, + ) + convex_stop_config = proto.Field( + proto.MESSAGE, + number=8, + oneof='automated_stopping_spec', + message=ConvexStopConfig, + ) + metrics = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=MetricSpec, + ) + parameters = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=ParameterSpec, + ) + algorithm = proto.Field( + proto.ENUM, + number=3, + enum=Algorithm, + ) + observation_noise = proto.Field( + proto.ENUM, + number=6, + enum=ObservationNoise, + ) + measurement_selection_type = proto.Field( + proto.ENUM, + number=7, + enum=MeasurementSelectionType, + ) + + +class Measurement(proto.Message): + r"""A message representing a Measurement of a Trial. A + Measurement contains the Metrics got by executing a Trial using + suggested hyperparameter values. + + Attributes: + elapsed_duration (google.protobuf.duration_pb2.Duration): + Output only. Time that the Trial has been + running at the point of this Measurement. + step_count (int): + Output only. The number of steps the machine + learning model has been trained for. Must be + non-negative. + metrics (Sequence[google.cloud.aiplatform_v1beta1.types.Measurement.Metric]): + Output only. A list of metrics got by + evaluating the objective functions using + suggested Parameter values. + """ + + class Metric(proto.Message): + r"""A message representing a metric in the measurement. + Attributes: + metric_id (str): + Output only. The ID of the Metric. The Metric should be + defined in [StudySpec's + Metrics][google.cloud.aiplatform.v1beta1.StudySpec.metrics]. + value (float): + Output only. The value for this metric. + """ + + metric_id = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.DOUBLE, + number=2, + ) + + elapsed_duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + step_count = proto.Field( + proto.INT64, + number=2, + ) + metrics = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Metric, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py new file mode 100644 index 0000000000..a984eb652d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Tensorboard', + }, +) + + +class Tensorboard(proto.Message): + r"""Tensorboard is a physical database that stores users' + training metrics. A default Tensorboard is provided in each + region of a GCP project. If needed users can also create extra + Tensorboards in their projects. + + Attributes: + name (str): + Output only. Name of the Tensorboard. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + display_name (str): + Required. User provided name of this + Tensorboard. + description (str): + Description of this Tensorboard. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a + Tensorboard. If set, this Tensorboard and all + sub-resources of this Tensorboard will be + secured by this key. + blob_storage_path_prefix (str): + Output only. Consumer project Cloud Storage + path prefix used to store blob data, which can + either be a bucket or directory. Does not end + with a '/'. + run_count (int): + Output only. The number of Runs stored in + this Tensorboard. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Tensorboard + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Tensorboard + was last updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard.LabelsEntry]): + The labels with user-defined metadata to + organize your Tensorboards. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. No more than 64 user labels can be + associated with one Tensorboard (System labels + are excluded). + + See https://goo.gl/xmQnxf for more information + and examples of labels. System reserved label + keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=11, + message=gca_encryption_spec.EncryptionSpec, + ) + blob_storage_path_prefix = proto.Field( + proto.STRING, + number=10, + ) + run_count = proto.Field( + proto.INT32, + number=5, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py new file mode 100644 index 0000000000..c9336e93b3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TimeSeriesData', + 'TimeSeriesDataPoint', + 'Scalar', + 'TensorboardTensor', + 'TensorboardBlobSequence', + 'TensorboardBlob', + }, +) + + +class TimeSeriesData(proto.Message): + r"""All the data stored in a TensorboardTimeSeries. + Attributes: + tensorboard_time_series_id (str): + Required. The ID of the + TensorboardTimeSeries, which will become the + final component of the TensorboardTimeSeries' + resource name + value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): + Required. Immutable. The value type of this + time series. All the values in this time series + data must match this value type. + values (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): + Required. Data points in this time series. + """ + + tensorboard_time_series_id = proto.Field( + proto.STRING, + number=1, + ) + value_type = proto.Field( + proto.ENUM, + number=2, + enum=tensorboard_time_series.TensorboardTimeSeries.ValueType, + ) + values = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='TimeSeriesDataPoint', + ) + + +class TimeSeriesDataPoint(proto.Message): + r"""A TensorboardTimeSeries data point. + Attributes: + scalar (google.cloud.aiplatform_v1beta1.types.Scalar): + A scalar value. + tensor (google.cloud.aiplatform_v1beta1.types.TensorboardTensor): + A tensor value. + blobs (google.cloud.aiplatform_v1beta1.types.TensorboardBlobSequence): + A blob sequence value. + wall_time (google.protobuf.timestamp_pb2.Timestamp): + Wall clock timestamp when this data point is + generated by the end user. + step (int): + Step index of this data point within the run. + """ + + scalar = proto.Field( + proto.MESSAGE, + number=3, + oneof='value', + message='Scalar', + ) + tensor = proto.Field( + proto.MESSAGE, + number=4, + oneof='value', + message='TensorboardTensor', + ) + blobs = proto.Field( + proto.MESSAGE, + number=5, + oneof='value', + message='TensorboardBlobSequence', + ) + wall_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + step = proto.Field( + proto.INT64, + number=2, + ) + + +class Scalar(proto.Message): + r"""One point viewable on a scalar metric plot. + Attributes: + value (float): + Value of the point at this step / timestamp. + """ + + value = proto.Field( + proto.DOUBLE, + number=1, + ) + + +class TensorboardTensor(proto.Message): + r"""One point viewable on a tensor metric plot. + Attributes: + value (bytes): + Required. Serialized form of + https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto + version_number (int): + Optional. Version number of TensorProto used to serialize + [value][google.cloud.aiplatform.v1beta1.TensorboardTensor.value]. + """ + + value = proto.Field( + proto.BYTES, + number=1, + ) + version_number = proto.Field( + proto.INT32, + number=2, + ) + + +class TensorboardBlobSequence(proto.Message): + r"""One point viewable on a blob metric plot, but mostly just a wrapper + message to work around repeated fields can't be used directly within + ``oneof`` fields. + + Attributes: + values (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): + List of blobs contained within the sequence. + """ + + values = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='TensorboardBlob', + ) + + +class TensorboardBlob(proto.Message): + r"""One blob (e.g, image, graph) viewable on a blob metric plot. + Attributes: + id (str): + Output only. A URI safe key uniquely + identifying a blob. Can be used to locate the + blob stored in the Cloud Storage bucket of the + consumer project. + data (bytes): + Optional. The bytes of the blob is not + present unless it's returned by the + ReadTensorboardBlobData endpoint. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + data = proto.Field( + proto.BYTES, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py new file mode 100644 index 0000000000..498bb15565 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TensorboardExperiment', + }, +) + + +class TensorboardExperiment(proto.Message): + r"""A TensorboardExperiment is a group of TensorboardRuns, that + are typically the results of a training job run, in a + Tensorboard. + + Attributes: + name (str): + Output only. Name of the TensorboardExperiment. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + display_name (str): + User provided name of this + TensorboardExperiment. + description (str): + Description of this TensorboardExperiment. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardExperiment was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardExperiment was last updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment.LabelsEntry]): + The labels with user-defined metadata to organize your + Datasets. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one Dataset (System labels are excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for each Dataset: + + - "aiplatform.googleapis.com/dataset_metadata_schema": + + - output only, its value is the + [metadata_schema's][metadata_schema_uri] title. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + source (str): + Immutable. Source of the + TensorboardExperiment. Example: a custom + training job. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + source = proto.Field( + proto.STRING, + number=8, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py new file mode 100644 index 0000000000..566908bba3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TensorboardRun', + }, +) + + +class TensorboardRun(proto.Message): + r"""TensorboardRun maps to a specific execution of a training job + with a given set of hyperparameter values, model definition, + dataset, etc + + Attributes: + name (str): + Output only. Name of the TensorboardRun. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + display_name (str): + Required. User provided name of this + TensorboardRun. This value must be unique among + all TensorboardRuns belonging to the same parent + TensorboardExperiment. + description (str): + Description of this TensorboardRun. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardRun was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardRun was last updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun.LabelsEntry]): + + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=8, + ) + etag = proto.Field( + proto.STRING, + number=9, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py new file mode 100644 index 0000000000..5f9eb0a856 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py @@ -0,0 +1,1048 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import operation +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'CreateTensorboardRequest', + 'GetTensorboardRequest', + 'ListTensorboardsRequest', + 'ListTensorboardsResponse', + 'UpdateTensorboardRequest', + 'DeleteTensorboardRequest', + 'CreateTensorboardExperimentRequest', + 'GetTensorboardExperimentRequest', + 'ListTensorboardExperimentsRequest', + 'ListTensorboardExperimentsResponse', + 'UpdateTensorboardExperimentRequest', + 'DeleteTensorboardExperimentRequest', + 'CreateTensorboardRunRequest', + 'GetTensorboardRunRequest', + 'ReadTensorboardBlobDataRequest', + 'ReadTensorboardBlobDataResponse', + 'ListTensorboardRunsRequest', + 'ListTensorboardRunsResponse', + 'UpdateTensorboardRunRequest', + 'DeleteTensorboardRunRequest', + 'CreateTensorboardTimeSeriesRequest', + 'GetTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesRequest', + 'ListTensorboardTimeSeriesResponse', + 'UpdateTensorboardTimeSeriesRequest', + 'DeleteTensorboardTimeSeriesRequest', + 'ReadTensorboardTimeSeriesDataRequest', + 'ReadTensorboardTimeSeriesDataResponse', + 'WriteTensorboardRunDataRequest', + 'WriteTensorboardRunDataResponse', + 'ExportTensorboardTimeSeriesDataRequest', + 'ExportTensorboardTimeSeriesDataResponse', + 'CreateTensorboardOperationMetadata', + 'UpdateTensorboardOperationMetadata', + }, +) + + +class CreateTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + Tensorboard in. Format: + ``projects/{project}/locations/{location}`` + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard.Tensorboard, + ) + + +class GetTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. + + Attributes: + name (str): + Required. The name of the Tensorboard resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTensorboardsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Attributes: + parent (str): + Required. The resource name of the Location + to list Tensorboards. Format: + 'projects/{project}/locations/{location}' + filter (str): + Lists the Tensorboards that match the filter + expression. + page_size (int): + The maximum number of Tensorboards to return. + The service may return fewer than this value. If + unspecified, at most 100 Tensorboards will be + returned. The maximum value is 100; values above + 100 will be coerced to 100. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. + + Attributes: + tensorboards (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard]): + The Tensorboards mathching the request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboards = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard.Tensorboard, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the Tensorboard resource by the update. The + fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. If the user does not provide a mask + then all fields will be overwritten if new values are + specified. + tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): + Required. The Tensorboard's ``name`` field is used to + identify the Tensorboard to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard.Tensorboard, + ) + + +class DeleteTensorboardRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. + + Attributes: + name (str): + Required. The name of the Tensorboard to be deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. + + Attributes: + parent (str): + Required. The resource name of the Tensorboard to create the + TensorboardExperiment in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + The TensorboardExperiment to create. + tensorboard_experiment_id (str): + Required. The ID to use for the Tensorboard experiment, + which will become the final component of the Tensorboard + experiment's resource name. + + This value should be 1-128 characters, and valid characters + are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard_experiment = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + tensorboard_experiment_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. + + Attributes: + name (str): + Required. The name of the TensorboardExperiment resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTensorboardExperimentsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Attributes: + parent (str): + Required. The resource name of the + Tensorboard to list TensorboardExperiments. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' + filter (str): + Lists the TensorboardExperiments that match + the filter expression. + page_size (int): + The maximum number of TensorboardExperiments + to return. The service may return fewer than + this value. If unspecified, at most 50 + TensorboardExperiments will be returned. The + maximum value is 1000; values above 1000 will be + coerced to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardExperimentsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. + + Attributes: + tensorboard_experiments (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment]): + The TensorboardExperiments mathching the + request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardExperimentsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardExperimentsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_experiments = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardExperiment resource by the + update. The fields specified in the update_mask are relative + to the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if new + values are specified. + tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): + Required. The TensorboardExperiment's ``name`` field is used + to identify the TensorboardExperiment to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard_experiment = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_experiment.TensorboardExperiment, + ) + + +class DeleteTensorboardExperimentRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. + + Attributes: + name (str): + Required. The name of the TensorboardExperiment to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. + + Attributes: + parent (str): + Required. The resource name of the Tensorboard to create the + TensorboardRun in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun to create. + tensorboard_run_id (str): + Required. The ID to use for the Tensorboard run, which will + become the final component of the Tensorboard run's resource + name. + + This value should be 1-128 characters, and valid characters + are /[a-z][0-9]-/. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard_run = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_run.TensorboardRun, + ) + tensorboard_run_id = proto.Field( + proto.STRING, + number=3, + ) + + +class GetTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. + + Attributes: + name (str): + Required. The name of the TensorboardRun resource. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ReadTensorboardBlobDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + Attributes: + time_series (str): + Required. The resource name of the TensorboardTimeSeries to + list Blobs. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' + blob_ids (Sequence[str]): + IDs of the blobs to read. + """ + + time_series = proto.Field( + proto.STRING, + number=1, + ) + blob_ids = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class ReadTensorboardBlobDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. + + Attributes: + blobs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): + Blob messages containing blob bytes. + """ + + blobs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=tensorboard_data.TensorboardBlob, + ) + + +class ListTensorboardRunsRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Attributes: + parent (str): + Required. The resource name of the + Tensorboard to list TensorboardRuns. Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' + filter (str): + Lists the TensorboardRuns that match the + filter expression. + page_size (int): + The maximum number of TensorboardRuns to + return. The service may return fewer than this + value. If unspecified, at most 50 + TensorboardRuns will be returned. The maximum + value is 1000; values above 1000 will be coerced + to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardRunsResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. + + Attributes: + tensorboard_runs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun]): + The TensorboardRuns mathching the request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardRunsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardRunsRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_runs = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_run.TensorboardRun, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardRun resource by the update. + The fields specified in the update_mask are relative to the + resource, not the full request. A field will be overwritten + if it is in the mask. If the user does not provide a mask + then all fields will be overwritten if new values are + specified. + tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): + Required. The TensorboardRun's ``name`` field is used to + identify the TensorboardRun to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard_run = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_run.TensorboardRun, + ) + + +class DeleteTensorboardRunRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. + + Attributes: + name (str): + Required. The name of the TensorboardRun to be deleted. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the TensorboardRun to create + the TensorboardTimeSeries in. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + tensorboard_time_series_id (str): + Optional. The user specified unique ID to use for the + TensorboardTimeSeries, which will become the final component + of the TensorboardTimeSeries's resource name. Ref: + go/ucaip-user-specified-id + + This value should match "[a-z0-9][a-z0-9-]{0, 127}". + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries to + create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + tensorboard_time_series_id = proto.Field( + proto.STRING, + number=3, + ) + tensorboard_time_series = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class GetTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. + + Attributes: + name (str): + Required. The name of the TensorboardTimeSeries resource. + Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Attributes: + parent (str): + Required. The resource name of the + TensorboardRun to list TensorboardTimeSeries. + Format: + 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' + filter (str): + Lists the TensorboardTimeSeries that match + the filter expression. + page_size (int): + The maximum number of TensorboardTimeSeries + to return. The service may return fewer than + this value. If unspecified, at most 50 + TensorboardTimeSeries will be returned. The + maximum value is 1000; values above 1000 will be + coerced to 1000. + page_token (str): + A page token, received from a previous + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] + must match the call that provided the page token. + order_by (str): + Field to use to sort the list. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Mask specifying which fields to read. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + read_mask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) + + +class ListTensorboardTimeSeriesResponse(proto.Message): + r"""Response message for + [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. + + Attributes: + tensorboard_time_series (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries]): + The TensorboardTimeSeries mathching the + request. + next_page_token (str): + A token, which can be sent as + [ListTensorboardTimeSeriesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardTimeSeriesRequest.page_token] + to retrieve the next page. If this field is omitted, there + are no subsequent pages. + """ + + @property + def raw_page(self): + return self + + tensorboard_time_series = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. + + Attributes: + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Field mask is used to specify the fields to be + overwritten in the TensorboardTimeSeries resource by the + update. The fields specified in the update_mask are relative + to the resource, not the full request. A field will be + overwritten if it is in the mask. If the user does not + provide a mask then all fields will be overwritten if new + values are specified. + tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): + Required. The TensorboardTimeSeries' ``name`` field is used + to identify the TensorboardTimeSeries to be updated. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + update_mask = proto.Field( + proto.MESSAGE, + number=1, + message=field_mask_pb2.FieldMask, + ) + tensorboard_time_series = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tensorboard_time_series.TensorboardTimeSeries, + ) + + +class DeleteTensorboardTimeSeriesRequest(proto.Message): + r"""Request message for + [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. + + Attributes: + name (str): + Required. The name of the TensorboardTimeSeries to be + deleted. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ReadTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + Attributes: + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries to + read data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + max_data_points (int): + The maximum number of TensorboardTimeSeries' + data to return. + This value should be a positive integer. + This value can be set to -1 to return all data. + filter (str): + Reads the TensorboardTimeSeries' data that + match the filter expression. + """ + + tensorboard_time_series = proto.Field( + proto.STRING, + number=1, + ) + max_data_points = proto.Field( + proto.INT32, + number=2, + ) + filter = proto.Field( + proto.STRING, + number=3, + ) + + +class ReadTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. + + Attributes: + time_series_data (google.cloud.aiplatform_v1beta1.types.TimeSeriesData): + The returned time series data. + """ + + time_series_data = proto.Field( + proto.MESSAGE, + number=1, + message=tensorboard_data.TimeSeriesData, + ) + + +class WriteTensorboardRunDataRequest(proto.Message): + r"""Request message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + + Attributes: + tensorboard_run (str): + Required. The resource name of the TensorboardRun to write + data to. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` + time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): + Required. The TensorboardTimeSeries data to + write. Values with in a time series are indexed + by their step value. Repeated writes to the same + step will overwrite the existing value for that + step. + The upper limit of data points per write request + is 5000. + """ + + tensorboard_run = proto.Field( + proto.STRING, + number=1, + ) + time_series_data = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=tensorboard_data.TimeSeriesData, + ) + + +class WriteTensorboardRunDataResponse(proto.Message): + r"""Response message for + [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. + """ + + +class ExportTensorboardTimeSeriesDataRequest(proto.Message): + r"""Request message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Attributes: + tensorboard_time_series (str): + Required. The resource name of the TensorboardTimeSeries to + export data from. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` + filter (str): + Exports the TensorboardTimeSeries' data that + match the filter expression. + page_size (int): + The maximum number of data points to return per page. The + default page_size will be 1000. Values must be between 1 and + 10000. Values above 10000 will be coerced to 10000. + page_token (str): + A page token, received from a previous + [TensorboardService.ExportTensorboardTimeSeries][] call. + Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + [TensorboardService.ExportTensorboardTimeSeries][] must + match the call that provided the page token. + order_by (str): + Field to use to sort the + TensorboardTimeSeries' data. By default, + TensorboardTimeSeries' data will be returned in + a pseudo random order. + """ + + tensorboard_time_series = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + order_by = proto.Field( + proto.STRING, + number=5, + ) + + +class ExportTensorboardTimeSeriesDataResponse(proto.Message): + r"""Response message for + [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. + + Attributes: + time_series_data_points (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): + The returned time series data points. + next_page_token (str): + A token, which can be sent as + [ExportTensorboardTimeSeriesRequest.page_token][] to + retrieve the next page. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + time_series_data_points = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=tensorboard_data.TimeSeriesDataPoint, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateTensorboardOperationMetadata(proto.Message): + r"""Details of operations that perform create Tensorboard. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Tensorboard. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class UpdateTensorboardOperationMetadata(proto.Message): + r"""Details of operations that perform update Tensorboard. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for Tensorboard. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py new file mode 100644 index 0000000000..298c631fb4 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TensorboardTimeSeries', + }, +) + + +class TensorboardTimeSeries(proto.Message): + r"""TensorboardTimeSeries maps to times series produced in + training runs + + Attributes: + name (str): + Output only. Name of the + TensorboardTimeSeries. + display_name (str): + Required. User provided name of this + TensorboardTimeSeries. This value should be + unique among all TensorboardTimeSeries resources + belonging to the same TensorboardRun resource + (parent resource). + description (str): + Description of this TensorboardTimeSeries. + value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): + Required. Immutable. Type of + TensorboardTimeSeries value. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardTimeSeries was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + TensorboardTimeSeries was last updated. + etag (str): + Used to perform a consistent read-modify- + rite updates. If not set, a blind "overwrite" + update happens. + plugin_name (str): + Immutable. Name of the plugin this time + series pertain to. Such as Scalar, Tensor, Blob + plugin_data (bytes): + Data of the current plugin, with the size + limited to 65KB. + metadata (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.Metadata): + Output only. Scalar, Tensor, or Blob metadata + for this TensorboardTimeSeries. + """ + class ValueType(proto.Enum): + r"""An enum representing the value type of a + TensorboardTimeSeries. + """ + VALUE_TYPE_UNSPECIFIED = 0 + SCALAR = 1 + TENSOR = 2 + BLOB_SEQUENCE = 3 + + class Metadata(proto.Message): + r"""Describes metadata for a TensorboardTimeSeries. + Attributes: + max_step (int): + Output only. Max step index of all data + points within a TensorboardTimeSeries. + max_wall_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Max wall clock timestamp of all + data points within a TensorboardTimeSeries. + max_blob_sequence_length (int): + Output only. The largest blob sequence length (number of + blobs) of all data points in this time series, if its + ValueType is BLOB_SEQUENCE. + """ + + max_step = proto.Field( + proto.INT64, + number=1, + ) + max_wall_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + max_blob_sequence_length = proto.Field( + proto.INT64, + number=3, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) + value_type = proto.Field( + proto.ENUM, + number=4, + enum=ValueType, + ) + create_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + etag = proto.Field( + proto.STRING, + number=7, + ) + plugin_name = proto.Field( + proto.STRING, + number=8, + ) + plugin_data = proto.Field( + proto.BYTES, + number=9, + ) + metadata = proto.Field( + proto.MESSAGE, + number=10, + message=Metadata, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py new file mode 100644 index 0000000000..7f53b63004 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py @@ -0,0 +1,548 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'TrainingPipeline', + 'InputDataConfig', + 'FractionSplit', + 'FilterSplit', + 'PredefinedSplit', + 'TimestampSplit', + }, +) + + +class TrainingPipeline(proto.Message): + r"""The TrainingPipeline orchestrates tasks associated with training a + Model. It always executes the training task, and optionally may also + export data from Vertex AI's Dataset which becomes the training + input, + [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] + the Model to Vertex AI, and evaluate the Model. + + Attributes: + name (str): + Output only. Resource name of the + TrainingPipeline. + display_name (str): + Required. The user-defined name of this + TrainingPipeline. + input_data_config (google.cloud.aiplatform_v1beta1.types.InputDataConfig): + Specifies Vertex AI owned input data that may be used for + training the Model. The TrainingPipeline's + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] + should make clear whether this config is used and if there + are any special requirements on how it should be filled. If + nothing about this config is mentioned in the + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], + then it should be assumed that the TrainingPipeline does not + depend on this configuration. + training_task_definition (str): + Required. A Google Cloud Storage path to the + YAML file that defines the training task which + is responsible for producing the model artifact, + and may also include additional auxiliary work. + The definition files that can be used here are + found in gs://google-cloud- + aiplatform/schema/trainingjob/definition/. Note: + The URI given on output will be immutable and + probably different, including the URI scheme, + than the one given on input. The output URI will + point to a location where the user only has a + read access. + training_task_inputs (google.protobuf.struct_pb2.Value): + Required. The training task's parameter(s), as specified in + the + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s + ``inputs``. + training_task_metadata (google.protobuf.struct_pb2.Value): + Output only. The metadata information as specified in the + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s + ``metadata``. This metadata is an auxiliary runtime and + final information about the training task. While the + pipeline is running this information is populated only at a + best effort basis. Only present if the pipeline's + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] + contains ``metadata`` object. + model_to_upload (google.cloud.aiplatform_v1beta1.types.Model): + Describes the Model that may be uploaded (via + [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]) + by this TrainingPipeline. The TrainingPipeline's + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] + should make clear whether this Model description should be + populated, and if there are any special requirements + regarding how it should be filled. If nothing is mentioned + in the + [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], + then it should be assumed that this field should not be + filled and the training task either uploads the Model + without a need of this information, or that training task + does not support uploading a Model as part of the pipeline. + When the Pipeline's state becomes + ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been + uploaded into Vertex AI, then the model_to_upload's resource + [name][google.cloud.aiplatform.v1beta1.Model.name] is + populated. The Model is always uploaded into the Project and + Location in which this pipeline is. + state (google.cloud.aiplatform_v1beta1.types.PipelineState): + Output only. The detailed state of the + pipeline. + error (google.rpc.status_pb2.Status): + Output only. Only populated when the pipeline's state is + ``PIPELINE_STATE_FAILED`` or ``PIPELINE_STATE_CANCELLED``. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline + was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline for the first + time entered the ``PIPELINE_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline entered any of + the following states: ``PIPELINE_STATE_SUCCEEDED``, + ``PIPELINE_STATE_FAILED``, ``PIPELINE_STATE_CANCELLED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TrainingPipeline + was most recently updated. + labels (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline.LabelsEntry]): + The labels with user-defined metadata to + organize TrainingPipelines. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): + Customer-managed encryption key spec for a TrainingPipeline. + If set, this TrainingPipeline will be secured by this key. + + Note: Model trained by this TrainingPipeline is also secured + by this key if + [model_to_upload][google.cloud.aiplatform.v1beta1.TrainingPipeline.encryption_spec] + is not set separately. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + input_data_config = proto.Field( + proto.MESSAGE, + number=3, + message='InputDataConfig', + ) + training_task_definition = proto.Field( + proto.STRING, + number=4, + ) + training_task_inputs = proto.Field( + proto.MESSAGE, + number=5, + message=struct_pb2.Value, + ) + training_task_metadata = proto.Field( + proto.MESSAGE, + number=6, + message=struct_pb2.Value, + ) + model_to_upload = proto.Field( + proto.MESSAGE, + number=7, + message=model.Model, + ) + state = proto.Field( + proto.ENUM, + number=9, + enum=pipeline_state.PipelineState, + ) + error = proto.Field( + proto.MESSAGE, + number=10, + message=status_pb2.Status, + ) + create_time = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=13, + message=timestamp_pb2.Timestamp, + ) + update_time = proto.Field( + proto.MESSAGE, + number=14, + message=timestamp_pb2.Timestamp, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=15, + ) + encryption_spec = proto.Field( + proto.MESSAGE, + number=18, + message=gca_encryption_spec.EncryptionSpec, + ) + + +class InputDataConfig(proto.Message): + r"""Specifies Vertex AI owned input data to be used for training, + and possibly evaluating, the Model. + + Attributes: + fraction_split (google.cloud.aiplatform_v1beta1.types.FractionSplit): + Split based on fractions defining the size of + each set. + filter_split (google.cloud.aiplatform_v1beta1.types.FilterSplit): + Split based on the provided filters for each + set. + predefined_split (google.cloud.aiplatform_v1beta1.types.PredefinedSplit): + Supported only for tabular Datasets. + Split based on a predefined key. + timestamp_split (google.cloud.aiplatform_v1beta1.types.TimestampSplit): + Supported only for tabular Datasets. + Split based on the timestamp of the input data + pieces. + gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): + The Cloud Storage location where the training data is to be + written to. In the given directory a new directory is + created with name: + ``dataset---`` + where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 + format. All training input data is written into that + directory. + + The Vertex AI environment variables representing Cloud + Storage data URIs are represented in the Cloud Storage + wildcard format to support sharded data. e.g.: + "gs://.../training-*.jsonl" + + - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for + tabular data + + - AIP_TRAINING_DATA_URI = + "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" + + - AIP_VALIDATION_DATA_URI = + "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" + + - AIP_TEST_DATA_URI = + "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". + bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): + Only applicable to custom training with tabular Dataset with + BigQuery source. + + The BigQuery project location where the training data is to + be written to. In the given project a new dataset is created + with name + ``dataset___`` + where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All + training input data is written into that dataset. In the + dataset three tables are created, ``training``, + ``validation`` and ``test``. + + - AIP_DATA_FORMAT = "bigquery". + + - AIP_TRAINING_DATA_URI = + "bigquery_destination.dataset\_\ **\ .training" + + - AIP_VALIDATION_DATA_URI = + "bigquery_destination.dataset\_\ **\ .validation" + + - AIP_TEST_DATA_URI = + "bigquery_destination.dataset\_\ **\ .test". + dataset_id (str): + Required. The ID of the Dataset in the same Project and + Location which data will be used to train the Model. The + Dataset must use schema compatible with Model being trained, + and what is compatible should be described in the used + TrainingPipeline's [training_task_definition] + [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. + For tabular Datasets, all their data is exported to + training, to pick and choose from. + annotations_filter (str): + Applicable only to Datasets that have DataItems and + Annotations. + + A filter on Annotations of the Dataset. Only Annotations + that both match this filter and belong to DataItems not + ignored by the split method are used in respectively + training, validation or test role, depending on the role of + the DataItem they are on (for the auto-assigned that role is + decided by Vertex AI). A filter with same syntax as the one + used in + [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations] + may be used, but note here it filters across all Annotations + of the Dataset, and not just within a single DataItem. + annotation_schema_uri (str): + Applicable only to custom training with Datasets that have + DataItems and Annotations. + + Cloud Storage URI that points to a YAML file describing the + annotation schema. The schema is defined as an OpenAPI 3.0.2 + `Schema + Object `__. + The schema files that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/ , + note that the chosen schema must be consistent with + [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] + of the Dataset specified by + [dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id]. + + Only Annotations that both match this schema and belong to + DataItems not ignored by the split method are used in + respectively training, validation or test role, depending on + the role of the DataItem they are on. + + When used in conjunction with + [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter], + the Annotations used for training are filtered by both + [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter] + and + [annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri]. + """ + + fraction_split = proto.Field( + proto.MESSAGE, + number=2, + oneof='split', + message='FractionSplit', + ) + filter_split = proto.Field( + proto.MESSAGE, + number=3, + oneof='split', + message='FilterSplit', + ) + predefined_split = proto.Field( + proto.MESSAGE, + number=4, + oneof='split', + message='PredefinedSplit', + ) + timestamp_split = proto.Field( + proto.MESSAGE, + number=5, + oneof='split', + message='TimestampSplit', + ) + gcs_destination = proto.Field( + proto.MESSAGE, + number=8, + oneof='destination', + message=io.GcsDestination, + ) + bigquery_destination = proto.Field( + proto.MESSAGE, + number=10, + oneof='destination', + message=io.BigQueryDestination, + ) + dataset_id = proto.Field( + proto.STRING, + number=1, + ) + annotations_filter = proto.Field( + proto.STRING, + number=6, + ) + annotation_schema_uri = proto.Field( + proto.STRING, + number=9, + ) + + +class FractionSplit(proto.Message): + r"""Assigns the input data to training, validation, and test sets as per + the given fractions. Any of ``training_fraction``, + ``validation_fraction`` and ``test_fraction`` may optionally be + provided, they must sum to up to 1. If the provided ones sum to less + than 1, the remainder is assigned to sets as decided by Vertex AI. + If none of the fractions are set, by default roughly 80% of data is + used for training, 10% for validation, and 10% for test. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + + +class FilterSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on the given filters, data pieces not matched by any + filter are ignored. Currently only supported for Datasets + containing DataItems. + If any of the filters in this message are to match nothing, then + they can be set as '-' (the minus sign). + + Supported only for unstructured Datasets. + + Attributes: + training_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to train the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + validation_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to validate the Model. A + filter with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + test_filter (str): + Required. A filter on DataItems of the Dataset. DataItems + that match this filter are used to test the Model. A filter + with same syntax as the one used in + [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] + may be used. If a single DataItem is matched by more than + one of the FilterSplit filters, then it is assigned to the + first set that applies to it in the training, validation, + test order. + """ + + training_filter = proto.Field( + proto.STRING, + number=1, + ) + validation_filter = proto.Field( + proto.STRING, + number=2, + ) + test_filter = proto.Field( + proto.STRING, + number=3, + ) + + +class PredefinedSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on the value of a provided key. + + Supported only for tabular Datasets. + + Attributes: + key (str): + Required. The key is a name of one of the Dataset's data + columns. The value of the key (either the label's value or + value in the column) must be one of {``training``, + ``validation``, ``test``}, and it defines to which set the + given piece of data is assigned. If for a piece of data the + key is not present or has an invalid value, that piece is + ignored by the pipeline. + """ + + key = proto.Field( + proto.STRING, + number=1, + ) + + +class TimestampSplit(proto.Message): + r"""Assigns input data to training, validation, and test sets + based on a provided timestamps. The youngest data pieces are + assigned to training set, next to validation set, and the oldest + to the test set. + Supported only for tabular Datasets. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + key (str): + Required. The key is a name of one of the Dataset's data + columns. The values of the key (the values in the column) + must be in RFC 3339 ``date-time`` format, where + ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). If + for a piece of data the key is not present or has an invalid + value, that piece is ignored by the pipeline. + """ + + training_fraction = proto.Field( + proto.DOUBLE, + number=1, + ) + validation_fraction = proto.Field( + proto.DOUBLE, + number=2, + ) + test_fraction = proto.Field( + proto.DOUBLE, + number=3, + ) + key = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py new file mode 100644 index 0000000000..45df0b2e21 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'BoolArray', + 'DoubleArray', + 'Int64Array', + 'StringArray', + }, +) + + +class BoolArray(proto.Message): + r"""A list of boolean values. + Attributes: + values (Sequence[bool]): + A list of bool values. + """ + + values = proto.RepeatedField( + proto.BOOL, + number=1, + ) + + +class DoubleArray(proto.Message): + r"""A list of double values. + Attributes: + values (Sequence[float]): + A list of bool values. + """ + + values = proto.RepeatedField( + proto.DOUBLE, + number=1, + ) + + +class Int64Array(proto.Message): + r"""A list of int64 values. + Attributes: + values (Sequence[int]): + A list of int64 values. + """ + + values = proto.RepeatedField( + proto.INT64, + number=1, + ) + + +class StringArray(proto.Message): + r"""A list of string values. + Attributes: + values (Sequence[str]): + A list of string values. + """ + + values = proto.RepeatedField( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py new file mode 100644 index 0000000000..804f7e883d --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'UserActionReference', + }, +) + + +class UserActionReference(proto.Message): + r"""References an API call. It contains more information about + long running operation and Jobs that are triggered by the API + call. + + Attributes: + operation (str): + For API calls that return a long running + operation. Resource name of the long running + operation. Format: + 'projects/{project}/locations/{location}/operations/{operation}' + data_labeling_job (str): + For API calls that start a LabelingJob. Resource name of the + LabelingJob. Format: + 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' + method (str): + The method name of the API RPC call. For + example, + "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". + """ + + operation = proto.Field( + proto.STRING, + number=1, + oneof='reference', + ) + data_labeling_job = proto.Field( + proto.STRING, + number=2, + oneof='reference', + ) + method = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py new file mode 100644 index 0000000000..789f7c0840 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'Value', + }, +) + + +class Value(proto.Message): + r"""Value is the value of the field. + Attributes: + int_value (int): + An integer value. + double_value (float): + A double value. + string_value (str): + A string value. + """ + + int_value = proto.Field( + proto.INT64, + number=1, + oneof='value', + ) + double_value = proto.Field( + proto.DOUBLE, + number=2, + oneof='value', + ) + string_value = proto.Field( + proto.STRING, + number=3, + oneof='value', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py new file mode 100644 index 0000000000..883b908d5e --- /dev/null +++ b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py @@ -0,0 +1,588 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import operation +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1', + manifest={ + 'GetStudyRequest', + 'CreateStudyRequest', + 'ListStudiesRequest', + 'ListStudiesResponse', + 'DeleteStudyRequest', + 'LookupStudyRequest', + 'SuggestTrialsRequest', + 'SuggestTrialsResponse', + 'SuggestTrialsMetadata', + 'CreateTrialRequest', + 'GetTrialRequest', + 'ListTrialsRequest', + 'ListTrialsResponse', + 'AddTrialMeasurementRequest', + 'CompleteTrialRequest', + 'DeleteTrialRequest', + 'CheckTrialEarlyStoppingStateRequest', + 'CheckTrialEarlyStoppingStateResponse', + 'CheckTrialEarlyStoppingStateMetatdata', + 'StopTrialRequest', + 'ListOptimalTrialsRequest', + 'ListOptimalTrialsResponse', + }, +) + + +class GetStudyRequest(proto.Message): + r"""Request message for + [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. + + Attributes: + name (str): + Required. The name of the Study resource. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateStudyRequest(proto.Message): + r"""Request message for + [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + CustomJob in. Format: + ``projects/{project}/locations/{location}`` + study (google.cloud.aiplatform_v1beta1.types.Study): + Required. The Study configuration used to + create the Study. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + study = proto.Field( + proto.MESSAGE, + number=2, + message=gca_study.Study, + ) + + +class ListStudiesRequest(proto.Message): + r"""Request message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + Study from. Format: + ``projects/{project}/locations/{location}`` + page_token (str): + Optional. A page token to request the next + page of results. If unspecified, there are no + subsequent pages. + page_size (int): + Optional. The maximum number of studies to + return per "page" of results. If unspecified, + service will pick an appropriate default. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_token = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + + +class ListStudiesResponse(proto.Message): + r"""Response message for + [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. + + Attributes: + studies (Sequence[google.cloud.aiplatform_v1beta1.types.Study]): + The studies associated with the project. + next_page_token (str): + Passes this token as the ``page_token`` field of the request + for a subsequent call. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + studies = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Study, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteStudyRequest(proto.Message): + r"""Request message for + [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. + + Attributes: + name (str): + Required. The name of the Study resource to be deleted. + Format: + ``projects/{project}/locations/{location}/studies/{study}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class LookupStudyRequest(proto.Message): + r"""Request message for + [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. + + Attributes: + parent (str): + Required. The resource name of the Location to get the Study + from. Format: ``projects/{project}/locations/{location}`` + display_name (str): + Required. The user-defined display name of + the Study + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + + +class SuggestTrialsRequest(proto.Message): + r"""Request message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + + Attributes: + parent (str): + Required. The project and location that the Study belongs + to. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + suggestion_count (int): + Required. The number of suggestions + requested. + client_id (str): + Required. The identifier of the client that is requesting + the suggestion. + + If multiple SuggestTrialsRequests have the same + ``client_id``, the service will return the identical + suggested Trial if the Trial is pending, and provide a new + Trial if the last suggested Trial was completed. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + suggestion_count = proto.Field( + proto.INT32, + number=2, + ) + client_id = proto.Field( + proto.STRING, + number=3, + ) + + +class SuggestTrialsResponse(proto.Message): + r"""Response message for + [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. + + Attributes: + trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): + A list of Trials. + study_state (google.cloud.aiplatform_v1beta1.types.Study.State): + The state of the Study. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which operation processing + completed. + """ + + trials = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Trial, + ) + study_state = proto.Field( + proto.ENUM, + number=2, + enum=gca_study.Study.State, + ) + start_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class SuggestTrialsMetadata(proto.Message): + r"""Details of operations that perform Trials suggestion. + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for suggesting Trials. + client_id (str): + The identifier of the client that is requesting the + suggestion. + + If multiple SuggestTrialsRequests have the same + ``client_id``, the service will return the identical + suggested Trial if the Trial is pending, and provide a new + Trial if the last suggested Trial was completed. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + client_id = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateTrialRequest(proto.Message): + r"""Request message for + [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. + + Attributes: + parent (str): + Required. The resource name of the Study to create the Trial + in. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + trial (google.cloud.aiplatform_v1beta1.types.Trial): + Required. The Trial to create. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + trial = proto.Field( + proto.MESSAGE, + number=2, + message=gca_study.Trial, + ) + + +class GetTrialRequest(proto.Message): + r"""Request message for + [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. + + Attributes: + name (str): + Required. The name of the Trial resource. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTrialsRequest(proto.Message): + r"""Request message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + + Attributes: + parent (str): + Required. The resource name of the Study to list the Trial + from. Format: + ``projects/{project}/locations/{location}/studies/{study}`` + page_token (str): + Optional. A page token to request the next + page of results. If unspecified, there are no + subsequent pages. + page_size (int): + Optional. The number of Trials to retrieve + per "page" of results. If unspecified, the + service will pick an appropriate default. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + page_token = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + + +class ListTrialsResponse(proto.Message): + r"""Response message for + [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. + + Attributes: + trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): + The Trials associated with the Study. + next_page_token (str): + Pass this token as the ``page_token`` field of the request + for a subsequent call. If this field is omitted, there are + no subsequent pages. + """ + + @property + def raw_page(self): + return self + + trials = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Trial, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class AddTrialMeasurementRequest(proto.Message): + r"""Request message for + [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. + + Attributes: + trial_name (str): + Required. The name of the trial to add measurement. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + measurement (google.cloud.aiplatform_v1beta1.types.Measurement): + Required. The measurement to be added to a + Trial. + """ + + trial_name = proto.Field( + proto.STRING, + number=1, + ) + measurement = proto.Field( + proto.MESSAGE, + number=3, + message=gca_study.Measurement, + ) + + +class CompleteTrialRequest(proto.Message): + r"""Request message for + [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. + + Attributes: + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + final_measurement (google.cloud.aiplatform_v1beta1.types.Measurement): + Optional. If provided, it will be used as the completed + Trial's final_measurement; Otherwise, the service will + auto-select a previously reported measurement as the + final-measurement + trial_infeasible (bool): + Optional. True if the Trial cannot be run with the given + Parameter, and final_measurement will be ignored. + infeasible_reason (str): + Optional. A human readable reason why the trial was + infeasible. This should only be provided if + ``trial_infeasible`` is true. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + final_measurement = proto.Field( + proto.MESSAGE, + number=2, + message=gca_study.Measurement, + ) + trial_infeasible = proto.Field( + proto.BOOL, + number=3, + ) + infeasible_reason = proto.Field( + proto.STRING, + number=4, + ) + + +class DeleteTrialRequest(proto.Message): + r"""Request message for + [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. + + Attributes: + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class CheckTrialEarlyStoppingStateRequest(proto.Message): + r"""Request message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + + Attributes: + trial_name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + trial_name = proto.Field( + proto.STRING, + number=1, + ) + + +class CheckTrialEarlyStoppingStateResponse(proto.Message): + r"""Response message for + [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. + + Attributes: + should_stop (bool): + True if the Trial should stop. + """ + + should_stop = proto.Field( + proto.BOOL, + number=1, + ) + + +class CheckTrialEarlyStoppingStateMetatdata(proto.Message): + r"""This message will be placed in the metadata field of a + google.longrunning.Operation associated with a + CheckTrialEarlyStoppingState request. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for suggesting Trials. + study (str): + The name of the Study that the Trial belongs + to. + trial (str): + The Trial name. + """ + + generic_metadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + study = proto.Field( + proto.STRING, + number=2, + ) + trial = proto.Field( + proto.STRING, + number=3, + ) + + +class StopTrialRequest(proto.Message): + r"""Request message for + [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. + + Attributes: + name (str): + Required. The Trial's name. Format: + ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListOptimalTrialsRequest(proto.Message): + r"""Request message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + + Attributes: + parent (str): + Required. The name of the Study that the + optimal Trial belongs to. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + + +class ListOptimalTrialsResponse(proto.Message): + r"""Response message for + [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. + + Attributes: + optimal_trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): + The pareto-optimal Trials for multiple objective Study or + the optimal trial for single objective Study. The definition + of pareto-optimal can be checked in wiki page. + https://en.wikipedia.org/wiki/Pareto_efficiency + """ + + optimal_trials = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_study.Trial, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/mypy.ini b/owl-bot-staging/v1beta1/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1beta1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1beta1/noxfile.py b/owl-bot-staging/v1beta1/noxfile.py new file mode 100644 index 0000000000..ff8279ad3e --- /dev/null +++ b/owl-bot-staging/v1beta1/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/.coveragerc b/owl-bot-staging/v1beta1/schema/predict/instance/.coveragerc new file mode 100644 index 0000000000..39419c276e --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/MANIFEST.in b/owl-bot-staging/v1beta1/schema/predict/instance/MANIFEST.in new file mode 100644 index 0000000000..8b11497385 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1beta1/schema/predict/instance *.py +recursive-include google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/README.rst b/owl-bot-staging/v1beta1/schema/predict/instance/README.rst new file mode 100644 index 0000000000..a45a5dce1c --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1beta1 Schema Predict Instance API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1beta1 Schema Predict Instance API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/docs/conf.py b/owl-bot-staging/v1beta1/schema/predict/instance/docs/conf.py new file mode 100644 index 0000000000..54743ec93b --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1beta1-schema-predict-instance documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1beta1-schema-predict-instance" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1beta1 Schema Predict Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-predict-instance-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-predict-instance.tex", + u"google-cloud-aiplatform-v1beta1-schema-predict-instance Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-predict-instance", + u"Google Cloud Aiplatform V1beta1 Schema Predict Instance Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-predict-instance", + u"google-cloud-aiplatform-v1beta1-schema-predict-instance Documentation", + author, + "google-cloud-aiplatform-v1beta1-schema-predict-instance", + "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Predict Instance API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/docs/index.rst b/owl-bot-staging/v1beta1/schema/predict/instance/docs/index.rst new file mode 100644 index 0000000000..8702e5d3aa --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + instance_v1beta1/services + instance_v1beta1/types diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/services.rst b/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/services.rst new file mode 100644 index 0000000000..941dbcca59 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +================================================================================ +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/types.rst b/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/types.rst new file mode 100644 index 0000000000..7caa088065 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API +============================================================================= + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py new file mode 100644 index 0000000000..5f9e065de0 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ImageClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_segmentation import ImageSegmentationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_classification import TextClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_extraction import TextExtractionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_sentiment import TextSentimentPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_classification import VideoClassificationPredictionInstance +from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ('ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..41ab5407a7 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionInstance +from .types.image_object_detection import ImageObjectDetectionPredictionInstance +from .types.image_segmentation import ImageSegmentationPredictionInstance +from .types.text_classification import TextClassificationPredictionInstance +from .types.text_extraction import TextExtractionPredictionInstance +from .types.text_sentiment import TextSentimentPredictionInstance +from .types.video_action_recognition import VideoActionRecognitionPredictionInstance +from .types.video_classification import VideoClassificationPredictionInstance +from .types.video_object_tracking import VideoObjectTrackingPredictionInstance + +__all__ = ( +'ImageClassificationPredictionInstance', +'ImageObjectDetectionPredictionInstance', +'ImageSegmentationPredictionInstance', +'TextClassificationPredictionInstance', +'TextExtractionPredictionInstance', +'TextSentimentPredictionInstance', +'VideoActionRecognitionPredictionInstance', +'VideoClassificationPredictionInstance', +'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..38379e8208 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed new file mode 100644 index 0000000000..46ccbaf568 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py new file mode 100644 index 0000000000..80a5332604 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionInstance, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionInstance, +) +from .image_segmentation import ( + ImageSegmentationPredictionInstance, +) +from .text_classification import ( + TextClassificationPredictionInstance, +) +from .text_extraction import ( + TextExtractionPredictionInstance, +) +from .text_sentiment import ( + TextSentimentPredictionInstance, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionInstance, +) +from .video_classification import ( + VideoClassificationPredictionInstance, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionInstance, +) + +__all__ = ( + 'ImageClassificationPredictionInstance', + 'ImageObjectDetectionPredictionInstance', + 'ImageSegmentationPredictionInstance', + 'TextClassificationPredictionInstance', + 'TextExtractionPredictionInstance', + 'TextSentimentPredictionInstance', + 'VideoActionRecognitionPredictionInstance', + 'VideoClassificationPredictionInstance', + 'VideoObjectTrackingPredictionInstance', +) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..aac9e2bc91 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageClassificationPredictionInstance', + }, +) + + +class ImageClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Image Classification. + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..80a5d797a1 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageObjectDetectionPredictionInstance', + }, +) + + +class ImageObjectDetectionPredictionInstance(proto.Message): + r"""Prediction input format for Image Object Detection. + Attributes: + content (str): + The image bytes or Cloud Storage URI to make + the prediction on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/gif + - image/png + - image/webp + - image/bmp + - image/tiff + - image/vnd.microsoft.icon + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..e1b5cfc21f --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'ImageSegmentationPredictionInstance', + }, +) + + +class ImageSegmentationPredictionInstance(proto.Message): + r"""Prediction input format for Image Segmentation. + Attributes: + content (str): + The image bytes to make the predictions on. + mime_type (str): + The MIME type of the content of the image. + Only the images in below listed MIME types are + supported. - image/jpeg + - image/png + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py new file mode 100644 index 0000000000..0c1ea43a72 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextClassificationPredictionInstance', + }, +) + + +class TextClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Text Classification. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..8f47b27080 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextExtractionPredictionInstance', + }, +) + + +class TextExtractionPredictionInstance(proto.Message): + r"""Prediction input format for Text Extraction. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + key (str): + This field is only used for batch prediction. + If a key is provided, the batch prediction + result will by mapped to this key. If omitted, + then the batch prediction result will contain + the entire input instance. Vertex AI will not + check if keys in the request are duplicates, so + it is up to the caller to ensure the keys are + unique. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + key = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..ab416779b6 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'TextSentimentPredictionInstance', + }, +) + + +class TextSentimentPredictionInstance(proto.Message): + r"""Prediction input format for Text Sentiment. + Attributes: + content (str): + The text snippet to make the predictions on. + mime_type (str): + The MIME type of the text snippet. The + supported MIME types are listed below. + - text/plain + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..c7a76efda2 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoActionRecognitionPredictionInstance', + }, +) + + +class VideoActionRecognitionPredictionInstance(proto.Message): + r"""Prediction input format for Video Action Recognition. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..56d662ef88 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoClassificationPredictionInstance', + }, +) + + +class VideoClassificationPredictionInstance(proto.Message): + r"""Prediction input format for Video Classification. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..7344d419a8 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.instance', + manifest={ + 'VideoObjectTrackingPredictionInstance', + }, +) + + +class VideoObjectTrackingPredictionInstance(proto.Message): + r"""Prediction input format for Video Object Tracking. + Attributes: + content (str): + The Google Cloud Storage location of the + video on which to perform the prediction. + mime_type (str): + The MIME type of the content of the video. + Only the following are supported: video/mp4 + video/avi video/quicktime + time_segment_start (str): + The beginning, inclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision. + time_segment_end (str): + The end, exclusive, of the video's time + segment on which to perform the prediction. + Expressed as a number of seconds as measured + from the start of the video, with "s" appended + at the end. Fractions are allowed, up to a + microsecond precision, and "inf" or "Infinity" + is allowed, which means the end of the video. + """ + + content = proto.Field( + proto.STRING, + number=1, + ) + mime_type = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.STRING, + number=3, + ) + time_segment_end = proto.Field( + proto.STRING, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/mypy.ini b/owl-bot-staging/v1beta1/schema/predict/instance/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/noxfile.py b/owl-bot-staging/v1beta1/schema/predict/instance/noxfile.py new file mode 100644 index 0000000000..c6550a646b --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py b/owl-bot-staging/v1beta1/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py new file mode 100644 index 0000000000..d9e8bd0b17 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class instanceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=instanceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the instance client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/setup.py b/owl-bot-staging/v1beta1/schema/predict/instance/setup.py new file mode 100644 index 0000000000..203e4d1710 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1beta1-schema-predict-instance', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.predict'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/tests/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/.coveragerc b/owl-bot-staging/v1beta1/schema/predict/params/.coveragerc new file mode 100644 index 0000000000..c065601f87 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/schema/predict/params/MANIFEST.in b/owl-bot-staging/v1beta1/schema/predict/params/MANIFEST.in new file mode 100644 index 0000000000..d5e40e84cb --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1beta1/schema/predict/params *.py +recursive-include google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/schema/predict/params/README.rst b/owl-bot-staging/v1beta1/schema/predict/params/README.rst new file mode 100644 index 0000000000..7f0e1dfe6c --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1beta1 Schema Predict Params API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1beta1 Schema Predict Params API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/schema/predict/params/docs/conf.py b/owl-bot-staging/v1beta1/schema/predict/params/docs/conf.py new file mode 100644 index 0000000000..2c264e47b8 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1beta1-schema-predict-params documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1beta1-schema-predict-params" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1beta1 Schema Predict Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-predict-params-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-predict-params.tex", + u"google-cloud-aiplatform-v1beta1-schema-predict-params Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-predict-params", + u"Google Cloud Aiplatform V1beta1 Schema Predict Params Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-predict-params", + u"google-cloud-aiplatform-v1beta1-schema-predict-params Documentation", + author, + "google-cloud-aiplatform-v1beta1-schema-predict-params", + "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Predict Params API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/schema/predict/params/docs/index.rst b/owl-bot-staging/v1beta1/schema/predict/params/docs/index.rst new file mode 100644 index 0000000000..7f7e36e347 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + params_v1beta1/services + params_v1beta1/types diff --git a/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/services.rst b/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/services.rst new file mode 100644 index 0000000000..b3b897a0f4 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +============================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/types.rst b/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/types.rst new file mode 100644 index 0000000000..722a1d8ba0 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API +=========================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py new file mode 100644 index 0000000000..464c39f26c --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ImageClassificationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_segmentation import ImageSegmentationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_classification import VideoClassificationPredictionParams +from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ('ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py new file mode 100644 index 0000000000..91b718b437 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.image_classification import ImageClassificationPredictionParams +from .types.image_object_detection import ImageObjectDetectionPredictionParams +from .types.image_segmentation import ImageSegmentationPredictionParams +from .types.video_action_recognition import VideoActionRecognitionPredictionParams +from .types.video_classification import VideoClassificationPredictionParams +from .types.video_object_tracking import VideoObjectTrackingPredictionParams + +__all__ = ( +'ImageClassificationPredictionParams', +'ImageObjectDetectionPredictionParams', +'ImageSegmentationPredictionParams', +'VideoActionRecognitionPredictionParams', +'VideoClassificationPredictionParams', +'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..6b925dd9dc --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed new file mode 100644 index 0000000000..acdcd7bc60 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py new file mode 100644 index 0000000000..70a92bb59c --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .image_classification import ( + ImageClassificationPredictionParams, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionParams, +) +from .image_segmentation import ( + ImageSegmentationPredictionParams, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionParams, +) +from .video_classification import ( + VideoClassificationPredictionParams, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionParams, +) + +__all__ = ( + 'ImageClassificationPredictionParams', + 'ImageObjectDetectionPredictionParams', + 'ImageSegmentationPredictionParams', + 'VideoActionRecognitionPredictionParams', + 'VideoClassificationPredictionParams', + 'VideoObjectTrackingPredictionParams', +) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py new file mode 100644 index 0000000000..67c5453a93 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageClassificationPredictionParams', + }, +) + + +class ImageClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..baed8905ee --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageObjectDetectionPredictionParams', + }, +) + + +class ImageObjectDetectionPredictionParams(proto.Message): + r"""Prediction model parameters for Image Object Detection. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + Note that number of returned predictions is also + limited by metadata's predictionsLimit. Default + value is 10. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..8a5e999504 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'ImageSegmentationPredictionParams', + }, +) + + +class ImageSegmentationPredictionParams(proto.Message): + r"""Prediction model parameters for Image Segmentation. + Attributes: + confidence_threshold (float): + When the model predicts category of pixels of + the image, it will only provide predictions for + pixels that it is at least this much confident + about. All other pixels will be classified as + background. Default value is 0.5. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..37a8c2bc9c --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoActionRecognitionPredictionParams', + }, +) + + +class VideoActionRecognitionPredictionParams(proto.Message): + r"""Prediction model parameters for Video Action Recognition. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..ff7bbd1b86 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoClassificationPredictionParams', + }, +) + + +class VideoClassificationPredictionParams(proto.Message): + r"""Prediction model parameters for Video Classification. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The Model only returns up to that many top, + by confidence score, predictions per instance. + If this number is very high, the Model may + return fewer predictions. Default value is + 10,000. + segment_classification (bool): + Set to true to request segment-level + classification. Vertex AI returns labels and + their confidence scores for the entire time + segment of the video that user specified in the + input instance. Default value is true + shot_classification (bool): + Set to true to request shot-level + classification. Vertex AI determines the + boundaries for each camera shot in the entire + time segment of the video that user specified in + the input instance. Vertex AI then returns + labels and their confidence scores for each + detected shot, along with the start and end time + of the shot. + WARNING: Model evaluation is not done for this + classification type, the quality of it depends + on the training data, but there are no metrics + provided to describe that quality. + Default value is false + one_sec_interval_classification (bool): + Set to true to request classification for a + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + segment_classification = proto.Field( + proto.BOOL, + number=3, + ) + shot_classification = proto.Field( + proto.BOOL, + number=4, + ) + one_sec_interval_classification = proto.Field( + proto.BOOL, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..4e0e97f8d6 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.params', + manifest={ + 'VideoObjectTrackingPredictionParams', + }, +) + + +class VideoObjectTrackingPredictionParams(proto.Message): + r"""Prediction model parameters for Video Object Tracking. + Attributes: + confidence_threshold (float): + The Model only returns predictions with at + least this confidence score. Default value is + 0.0 + max_predictions (int): + The model only returns up to that many top, + by confidence score, predictions per frame of + the video. If this number is very high, the + Model may return fewer predictions per frame. + Default value is 50. + min_bounding_box_size (float): + Only bounding boxes with shortest edge at + least that long as a relative value of video + frame size are returned. Default value is 0.0. + """ + + confidence_threshold = proto.Field( + proto.FLOAT, + number=1, + ) + max_predictions = proto.Field( + proto.INT32, + number=2, + ) + min_bounding_box_size = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/mypy.ini b/owl-bot-staging/v1beta1/schema/predict/params/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1beta1/schema/predict/params/noxfile.py b/owl-bot-staging/v1beta1/schema/predict/params/noxfile.py new file mode 100644 index 0000000000..8af100d51e --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py b/owl-bot-staging/v1beta1/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py new file mode 100644 index 0000000000..17915abed8 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class paramsCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=paramsCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the params client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/setup.py b/owl-bot-staging/v1beta1/schema/predict/params/setup.py new file mode 100644 index 0000000000..573175f19b --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1beta1-schema-predict-params', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.predict'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/tests/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/.coveragerc b/owl-bot-staging/v1beta1/schema/predict/prediction/.coveragerc new file mode 100644 index 0000000000..1e0399e149 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/MANIFEST.in b/owl-bot-staging/v1beta1/schema/predict/prediction/MANIFEST.in new file mode 100644 index 0000000000..0a3bcaf660 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/aiplatform/v1beta1/schema/predict/prediction *.py +recursive-include google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/README.rst b/owl-bot-staging/v1beta1/schema/predict/prediction/README.rst new file mode 100644 index 0000000000..6c2ca66e04 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Aiplatform V1beta1 Schema Predict Prediction API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Aiplatform V1beta1 Schema Predict Prediction API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/conf.py b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/conf.py new file mode 100644 index 0000000000..f0ddfdbe71 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-aiplatform-v1beta1-schema-predict-prediction documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-aiplatform-v1beta1-schema-predict-prediction" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Aiplatform V1beta1 Schema Predict Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-predict-prediction-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-predict-prediction.tex", + u"google-cloud-aiplatform-v1beta1-schema-predict-prediction Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-predict-prediction", + u"Google Cloud Aiplatform V1beta1 Schema Predict Prediction Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-aiplatform-v1beta1-schema-predict-prediction", + u"google-cloud-aiplatform-v1beta1-schema-predict-prediction Documentation", + author, + "google-cloud-aiplatform-v1beta1-schema-predict-prediction", + "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Predict Prediction API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/index.rst b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/index.rst new file mode 100644 index 0000000000..7fc86a6ed5 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + prediction_v1beta1/services + prediction_v1beta1/types diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/services.rst b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/services.rst new file mode 100644 index 0000000000..6de5e17520 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/services.rst @@ -0,0 +1,4 @@ +Services for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +================================================================================== +.. toctree:: + :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/types.rst b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/types.rst new file mode 100644 index 0000000000..b14182d6d7 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API +=============================================================================== + +.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py new file mode 100644 index 0000000000..0b54451ca0 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_segmentation import ImageSegmentationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_classification import TabularClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_regression import TabularRegressionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_extraction import TextExtractionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import TextSentimentPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_classification import VideoClassificationPredictionResult +from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ('ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'TimeSeriesForecastingPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..495759c24b --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from .types.classification import ClassificationPredictionResult +from .types.image_object_detection import ImageObjectDetectionPredictionResult +from .types.image_segmentation import ImageSegmentationPredictionResult +from .types.tabular_classification import TabularClassificationPredictionResult +from .types.tabular_regression import TabularRegressionPredictionResult +from .types.text_extraction import TextExtractionPredictionResult +from .types.text_sentiment import TextSentimentPredictionResult +from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult +from .types.video_action_recognition import VideoActionRecognitionPredictionResult +from .types.video_classification import VideoClassificationPredictionResult +from .types.video_object_tracking import VideoObjectTrackingPredictionResult + +__all__ = ( +'ClassificationPredictionResult', +'ImageObjectDetectionPredictionResult', +'ImageSegmentationPredictionResult', +'TabularClassificationPredictionResult', +'TabularRegressionPredictionResult', +'TextExtractionPredictionResult', +'TextSentimentPredictionResult', +'TimeSeriesForecastingPredictionResult', +'VideoActionRecognitionPredictionResult', +'VideoClassificationPredictionResult', +'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json new file mode 100644 index 0000000000..99d3dc6402 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json @@ -0,0 +1,7 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1", + "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction", + "schema": "1.0" +} diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed new file mode 100644 index 0000000000..8cf97d7107 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py new file mode 100644 index 0000000000..4de65971c2 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py new file mode 100644 index 0000000000..f3b70f66dd --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .classification import ( + ClassificationPredictionResult, +) +from .image_object_detection import ( + ImageObjectDetectionPredictionResult, +) +from .image_segmentation import ( + ImageSegmentationPredictionResult, +) +from .tabular_classification import ( + TabularClassificationPredictionResult, +) +from .tabular_regression import ( + TabularRegressionPredictionResult, +) +from .text_extraction import ( + TextExtractionPredictionResult, +) +from .text_sentiment import ( + TextSentimentPredictionResult, +) +from .time_series_forecasting import ( + TimeSeriesForecastingPredictionResult, +) +from .video_action_recognition import ( + VideoActionRecognitionPredictionResult, +) +from .video_classification import ( + VideoClassificationPredictionResult, +) +from .video_object_tracking import ( + VideoObjectTrackingPredictionResult, +) + +__all__ = ( + 'ClassificationPredictionResult', + 'ImageObjectDetectionPredictionResult', + 'ImageSegmentationPredictionResult', + 'TabularClassificationPredictionResult', + 'TabularRegressionPredictionResult', + 'TextExtractionPredictionResult', + 'TextSentimentPredictionResult', + 'TimeSeriesForecastingPredictionResult', + 'VideoActionRecognitionPredictionResult', + 'VideoClassificationPredictionResult', + 'VideoObjectTrackingPredictionResult', +) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py new file mode 100644 index 0000000000..ecd16c7250 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ClassificationPredictionResult', + }, +) + + +class ClassificationPredictionResult(proto.Message): + r"""Prediction output format for Image and Text Classification. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py new file mode 100644 index 0000000000..d787871e99 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ImageObjectDetectionPredictionResult', + }, +) + + +class ImageObjectDetectionPredictionResult(proto.Message): + r"""Prediction output format for Image Object Detection. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): + Bounding boxes, i.e. the rectangles over the image, that + pinpoint the found AnnotationSpecs. Given in order that + matches the IDs. Each bounding box is an array of 4 numbers + ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent + the extremal coordinates of the box. They are relative to + the image size, and the point 0,0 is in the top left of the + image. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=3, + ) + bboxes = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=struct_pb2.ListValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py new file mode 100644 index 0000000000..92cc20720c --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'ImageSegmentationPredictionResult', + }, +) + + +class ImageSegmentationPredictionResult(proto.Message): + r"""Prediction output format for Image Segmentation. + Attributes: + category_mask (str): + A PNG image where each pixel in the mask + represents the category in which the pixel in + the original image was predicted to belong to. + The size of this image will be the same as the + original image. The mapping between the + AnntoationSpec and the color can be found in + model's metadata. The model will choose the most + likely category and if none of the categories + reach the confidence threshold, the pixel will + be marked as background. + confidence_mask (str): + A one channel image which is encoded as an + 8bit lossless PNG. The size of the image will be + the same as the original image. For a specific + pixel, darker color means less confidence in + correctness of the cateogry in the categoryMask + for the corresponding pixel. Black means no + confidence and white means complete confidence. + """ + + category_mask = proto.Field( + proto.STRING, + number=1, + ) + confidence_mask = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py new file mode 100644 index 0000000000..8a437022fd --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TabularClassificationPredictionResult', + }, +) + + +class TabularClassificationPredictionResult(proto.Message): + r"""Prediction output format for Tabular Classification. + Attributes: + classes (Sequence[str]): + The name of the classes being classified, + contains all possible values of the target + column. + scores (Sequence[float]): + The model's confidence in each class being + correct, higher value means higher confidence. + The N-th score corresponds to the N-th class in + classes. + """ + + classes = proto.RepeatedField( + proto.STRING, + number=1, + ) + scores = proto.RepeatedField( + proto.FLOAT, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py new file mode 100644 index 0000000000..a49f6f55ce --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TabularRegressionPredictionResult', + }, +) + + +class TabularRegressionPredictionResult(proto.Message): + r"""Prediction output format for Tabular Regression. + Attributes: + value (float): + The regression value. + lower_bound (float): + The lower bound of the prediction interval. + upper_bound (float): + The upper bound of the prediction interval. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + lower_bound = proto.Field( + proto.FLOAT, + number=2, + ) + upper_bound = proto.Field( + proto.FLOAT, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py new file mode 100644 index 0000000000..a92d9caefa --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TextExtractionPredictionResult', + }, +) + + +class TextExtractionPredictionResult(proto.Message): + r"""Prediction output format for Text Extraction. + Attributes: + ids (Sequence[int]): + The resource IDs of the AnnotationSpecs that + had been identified, ordered by the confidence + score descendingly. + display_names (Sequence[str]): + The display names of the AnnotationSpecs that + had been identified, order matches the IDs. + text_segment_start_offsets (Sequence[int]): + The start offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + text_segment_end_offsets (Sequence[int]): + The end offsets, inclusive, of the text + segment in which the AnnotationSpec has been + identified. Expressed as a zero-based number of + characters as measured from the start of the + text snippet. + confidences (Sequence[float]): + The Model's confidences in correctness of the + predicted IDs, higher value means higher + confidence. Order matches the Ids. + """ + + ids = proto.RepeatedField( + proto.INT64, + number=1, + ) + display_names = proto.RepeatedField( + proto.STRING, + number=2, + ) + text_segment_start_offsets = proto.RepeatedField( + proto.INT64, + number=3, + ) + text_segment_end_offsets = proto.RepeatedField( + proto.INT64, + number=4, + ) + confidences = proto.RepeatedField( + proto.FLOAT, + number=5, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py new file mode 100644 index 0000000000..4967b02aae --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TextSentimentPredictionResult', + }, +) + + +class TextSentimentPredictionResult(proto.Message): + r"""Prediction output format for Text Sentiment + Attributes: + sentiment (int): + The integer sentiment labels between 0 + (inclusive) and sentimentMax label (inclusive), + while 0 maps to the least positive sentiment and + sentimentMax maps to the most positive one. The + higher the score is, the more positive the + sentiment in the text snippet is. Note: + sentimentMax is an integer value between 1 + (inclusive) and 10 (inclusive). + """ + + sentiment = proto.Field( + proto.INT32, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py new file mode 100644 index 0000000000..67a3cd9dff --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'TimeSeriesForecastingPredictionResult', + }, +) + + +class TimeSeriesForecastingPredictionResult(proto.Message): + r"""Prediction output format for Time Series Forecasting. + Attributes: + value (float): + The regression value. + """ + + value = proto.Field( + proto.FLOAT, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py new file mode 100644 index 0000000000..bc53328da4 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoActionRecognitionPredictionResult', + }, +) + + +class VideoActionRecognitionPredictionResult(proto.Message): + r"""Prediction output format for Video Action Recognition. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py new file mode 100644 index 0000000000..95439add5e --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoClassificationPredictionResult', + }, +) + + +class VideoClassificationPredictionResult(proto.Message): + r"""Prediction output format for Video Classification. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + type_ (str): + The type of the prediction. The requested + types can be configured via parameters. This + will be one of - segment-classification + - shot-classification + - one-sec-interval-classification + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentStart' from the + input instance, for other types it is the start + of a shot or a 1 second interval respectively. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, exclusive, of the video's time + segment in which the AnnotationSpec has been + identified. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. Note that for + 'segment-classification' prediction type, this + equals the original 'timeSegmentEnd' from the + input instance, for other types it is the end of + a shot or a 1 second interval respectively. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + """ + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + type_ = proto.Field( + proto.STRING, + number=3, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=5, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=6, + message=wrappers_pb2.FloatValue, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py new file mode 100644 index 0000000000..34cf7ab1b9 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', + manifest={ + 'VideoObjectTrackingPredictionResult', + }, +) + + +class VideoObjectTrackingPredictionResult(proto.Message): + r"""Prediction output format for Video Object Tracking. + Attributes: + id (str): + The resource ID of the AnnotationSpec that + had been identified. + display_name (str): + The display name of the AnnotationSpec that + had been identified. + time_segment_start (google.protobuf.duration_pb2.Duration): + The beginning, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + time_segment_end (google.protobuf.duration_pb2.Duration): + The end, inclusive, of the video's time + segment in which the object instance has been + detected. Expressed as a number of seconds as + measured from the start of the video, with + fractions up to a microsecond precision, and + with "s" appended at the end. + confidence (google.protobuf.wrappers_pb2.FloatValue): + The Model's confidence in correction of this + prediction, higher value means higher + confidence. + frames (Sequence[google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.VideoObjectTrackingPredictionResult.Frame]): + All of the frames of the video in which a + single object instance has been detected. The + bounding boxes in the frames identify the same + object. + """ + + class Frame(proto.Message): + r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a + bounding box, i.e. the rectangle over the video frame pinpointing + the found AnnotationSpec. The coordinates are relative to the frame + size, and the point 0,0 is in the top left of the frame. + + Attributes: + time_offset (google.protobuf.duration_pb2.Duration): + A time (frame) of a video in which the object + has been detected. Expressed as a number of + seconds as measured from the start of the video, + with fractions up to a microsecond precision, + and with "s" appended at the end. + x_min (google.protobuf.wrappers_pb2.FloatValue): + The leftmost coordinate of the bounding box. + x_max (google.protobuf.wrappers_pb2.FloatValue): + The rightmost coordinate of the bounding box. + y_min (google.protobuf.wrappers_pb2.FloatValue): + The topmost coordinate of the bounding box. + y_max (google.protobuf.wrappers_pb2.FloatValue): + The bottommost coordinate of the bounding + box. + """ + + time_offset = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + x_min = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.FloatValue, + ) + x_max = proto.Field( + proto.MESSAGE, + number=3, + message=wrappers_pb2.FloatValue, + ) + y_min = proto.Field( + proto.MESSAGE, + number=4, + message=wrappers_pb2.FloatValue, + ) + y_max = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + + id = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + time_segment_start = proto.Field( + proto.MESSAGE, + number=3, + message=duration_pb2.Duration, + ) + time_segment_end = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + confidence = proto.Field( + proto.MESSAGE, + number=5, + message=wrappers_pb2.FloatValue, + ) + frames = proto.RepeatedField( + proto.MESSAGE, + number=6, + message=Frame, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/mypy.ini b/owl-bot-staging/v1beta1/schema/predict/prediction/mypy.ini new file mode 100644 index 0000000000..4505b48543 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/noxfile.py b/owl-bot-staging/v1beta1/schema/predict/prediction/noxfile.py new file mode 100644 index 0000000000..112aeac644 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py b/owl-bot-staging/v1beta1/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py new file mode 100644 index 0000000000..7860bb63cf --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class predictionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=predictionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the prediction client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/setup.py b/owl-bot-staging/v1beta1/schema/predict/prediction/setup.py new file mode 100644 index 0000000000..4044a5396a --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1beta1-schema-predict-prediction', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.predict'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py new file mode 100644 index 0000000000..8fdb92d15f --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py @@ -0,0 +1,348 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class aiplatformCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'add_context_artifacts_and_executions': ('context', 'artifacts', 'executions', ), + 'add_context_children': ('context', 'child_contexts', ), + 'add_execution_events': ('execution', 'events', ), + 'add_trial_measurement': ('trial_name', 'measurement', ), + 'batch_create_features': ('parent', 'requests', ), + 'batch_migrate_resources': ('parent', 'migrate_resource_requests', ), + 'batch_read_feature_values': ('featurestore', 'destination', 'entity_type_specs', 'csv_read_instances', ), + 'cancel_batch_prediction_job': ('name', ), + 'cancel_custom_job': ('name', ), + 'cancel_data_labeling_job': ('name', ), + 'cancel_hyperparameter_tuning_job': ('name', ), + 'cancel_pipeline_job': ('name', ), + 'cancel_training_pipeline': ('name', ), + 'check_trial_early_stopping_state': ('trial_name', ), + 'complete_trial': ('name', 'final_measurement', 'trial_infeasible', 'infeasible_reason', ), + 'create_artifact': ('parent', 'artifact', 'artifact_id', ), + 'create_batch_prediction_job': ('parent', 'batch_prediction_job', ), + 'create_context': ('parent', 'context', 'context_id', ), + 'create_custom_job': ('parent', 'custom_job', ), + 'create_data_labeling_job': ('parent', 'data_labeling_job', ), + 'create_dataset': ('parent', 'dataset', ), + 'create_endpoint': ('parent', 'endpoint', ), + 'create_entity_type': ('parent', 'entity_type_id', 'entity_type', ), + 'create_execution': ('parent', 'execution', 'execution_id', ), + 'create_feature': ('parent', 'feature', 'feature_id', ), + 'create_featurestore': ('parent', 'featurestore', 'featurestore_id', ), + 'create_hyperparameter_tuning_job': ('parent', 'hyperparameter_tuning_job', ), + 'create_index': ('parent', 'index', ), + 'create_index_endpoint': ('parent', 'index_endpoint', ), + 'create_metadata_schema': ('parent', 'metadata_schema', 'metadata_schema_id', ), + 'create_metadata_store': ('parent', 'metadata_store', 'metadata_store_id', ), + 'create_model_deployment_monitoring_job': ('parent', 'model_deployment_monitoring_job', ), + 'create_pipeline_job': ('parent', 'pipeline_job', 'pipeline_job_id', ), + 'create_specialist_pool': ('parent', 'specialist_pool', ), + 'create_study': ('parent', 'study', ), + 'create_tensorboard': ('parent', 'tensorboard', ), + 'create_tensorboard_experiment': ('parent', 'tensorboard_experiment_id', 'tensorboard_experiment', ), + 'create_tensorboard_run': ('parent', 'tensorboard_run', 'tensorboard_run_id', ), + 'create_tensorboard_time_series': ('parent', 'tensorboard_time_series', 'tensorboard_time_series_id', ), + 'create_training_pipeline': ('parent', 'training_pipeline', ), + 'create_trial': ('parent', 'trial', ), + 'delete_batch_prediction_job': ('name', ), + 'delete_context': ('name', 'force', ), + 'delete_custom_job': ('name', ), + 'delete_data_labeling_job': ('name', ), + 'delete_dataset': ('name', ), + 'delete_endpoint': ('name', ), + 'delete_entity_type': ('name', 'force', ), + 'delete_feature': ('name', ), + 'delete_featurestore': ('name', 'force', ), + 'delete_hyperparameter_tuning_job': ('name', ), + 'delete_index': ('name', ), + 'delete_index_endpoint': ('name', ), + 'delete_metadata_store': ('name', 'force', ), + 'delete_model': ('name', ), + 'delete_model_deployment_monitoring_job': ('name', ), + 'delete_pipeline_job': ('name', ), + 'delete_specialist_pool': ('name', 'force', ), + 'delete_study': ('name', ), + 'delete_tensorboard': ('name', ), + 'delete_tensorboard_experiment': ('name', ), + 'delete_tensorboard_run': ('name', ), + 'delete_tensorboard_time_series': ('name', ), + 'delete_training_pipeline': ('name', ), + 'delete_trial': ('name', ), + 'deploy_index': ('index_endpoint', 'deployed_index', ), + 'deploy_model': ('endpoint', 'deployed_model', 'traffic_split', ), + 'explain': ('endpoint', 'instances', 'parameters', 'explanation_spec_override', 'deployed_model_id', ), + 'export_data': ('name', 'export_config', ), + 'export_feature_values': ('entity_type', 'destination', 'feature_selector', 'snapshot_export', 'settings', ), + 'export_model': ('name', 'output_config', ), + 'export_tensorboard_time_series_data': ('tensorboard_time_series', 'filter', 'page_size', 'page_token', 'order_by', ), + 'get_annotation_spec': ('name', 'read_mask', ), + 'get_artifact': ('name', ), + 'get_batch_prediction_job': ('name', ), + 'get_context': ('name', ), + 'get_custom_job': ('name', ), + 'get_data_labeling_job': ('name', ), + 'get_dataset': ('name', 'read_mask', ), + 'get_endpoint': ('name', ), + 'get_entity_type': ('name', ), + 'get_execution': ('name', ), + 'get_feature': ('name', ), + 'get_featurestore': ('name', ), + 'get_hyperparameter_tuning_job': ('name', ), + 'get_index': ('name', ), + 'get_index_endpoint': ('name', ), + 'get_metadata_schema': ('name', ), + 'get_metadata_store': ('name', ), + 'get_model': ('name', ), + 'get_model_deployment_monitoring_job': ('name', ), + 'get_model_evaluation': ('name', ), + 'get_model_evaluation_slice': ('name', ), + 'get_pipeline_job': ('name', ), + 'get_specialist_pool': ('name', ), + 'get_study': ('name', ), + 'get_tensorboard': ('name', ), + 'get_tensorboard_experiment': ('name', ), + 'get_tensorboard_run': ('name', ), + 'get_tensorboard_time_series': ('name', ), + 'get_training_pipeline': ('name', ), + 'get_trial': ('name', ), + 'import_data': ('name', 'import_configs', ), + 'import_feature_values': ('entity_type', 'feature_specs', 'avro_source', 'bigquery_source', 'csv_source', 'feature_time_field', 'feature_time', 'entity_id_field', 'disable_online_serving', 'worker_count', ), + 'list_annotations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_artifacts': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_batch_prediction_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_contexts': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_custom_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_data_items': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_data_labeling_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), + 'list_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_entity_types': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_executions': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_features': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', 'latest_stats_count', ), + 'list_featurestores': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_hyperparameter_tuning_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_index_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_indexes': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_metadata_schemas': ('parent', 'page_size', 'page_token', 'filter', ), + 'list_metadata_stores': ('parent', 'page_size', 'page_token', ), + 'list_model_deployment_monitoring_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_model_evaluation_slices': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_models': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_optimal_trials': ('parent', ), + 'list_pipeline_jobs': ('parent', 'filter', 'page_size', 'page_token', ), + 'list_specialist_pools': ('parent', 'page_size', 'page_token', 'read_mask', ), + 'list_studies': ('parent', 'page_token', 'page_size', ), + 'list_tensorboard_experiments': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_tensorboard_runs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_tensorboards': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_tensorboard_time_series': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), + 'list_training_pipelines': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), + 'list_trials': ('parent', 'page_token', 'page_size', ), + 'lookup_study': ('parent', 'display_name', ), + 'pause_model_deployment_monitoring_job': ('name', ), + 'predict': ('endpoint', 'instances', 'parameters', ), + 'query_artifact_lineage_subgraph': ('artifact', 'max_hops', 'filter', ), + 'query_context_lineage_subgraph': ('context', ), + 'query_execution_inputs_and_outputs': ('execution', ), + 'read_feature_values': ('entity_type', 'entity_id', 'feature_selector', ), + 'read_tensorboard_blob_data': ('time_series', 'blob_ids', ), + 'read_tensorboard_time_series_data': ('tensorboard_time_series', 'max_data_points', 'filter', ), + 'resume_model_deployment_monitoring_job': ('name', ), + 'search_features': ('location', 'query', 'page_size', 'page_token', ), + 'search_migratable_resources': ('parent', 'page_size', 'page_token', 'filter', ), + 'search_model_deployment_monitoring_stats_anomalies': ('model_deployment_monitoring_job', 'deployed_model_id', 'objectives', 'feature_display_name', 'page_size', 'page_token', 'start_time', 'end_time', ), + 'stop_trial': ('name', ), + 'streaming_read_feature_values': ('entity_type', 'entity_ids', 'feature_selector', ), + 'suggest_trials': ('parent', 'suggestion_count', 'client_id', ), + 'undeploy_index': ('index_endpoint', 'deployed_index_id', ), + 'undeploy_model': ('endpoint', 'deployed_model_id', 'traffic_split', ), + 'update_artifact': ('artifact', 'update_mask', 'allow_missing', ), + 'update_context': ('context', 'update_mask', 'allow_missing', ), + 'update_dataset': ('dataset', 'update_mask', ), + 'update_endpoint': ('endpoint', 'update_mask', ), + 'update_entity_type': ('entity_type', 'update_mask', ), + 'update_execution': ('execution', 'update_mask', 'allow_missing', ), + 'update_feature': ('feature', 'update_mask', ), + 'update_featurestore': ('featurestore', 'update_mask', ), + 'update_index': ('index', 'update_mask', ), + 'update_index_endpoint': ('index_endpoint', 'update_mask', ), + 'update_model': ('model', 'update_mask', ), + 'update_model_deployment_monitoring_job': ('model_deployment_monitoring_job', 'update_mask', ), + 'update_specialist_pool': ('specialist_pool', 'update_mask', ), + 'update_tensorboard': ('update_mask', 'tensorboard', ), + 'update_tensorboard_experiment': ('update_mask', 'tensorboard_experiment', ), + 'update_tensorboard_run': ('update_mask', 'tensorboard_run', ), + 'update_tensorboard_time_series': ('update_mask', 'tensorboard_time_series', ), + 'upload_model': ('parent', 'model', ), + 'write_tensorboard_run_data': ('tensorboard_run', 'time_series_data', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=aiplatformCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the aiplatform client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py new file mode 100644 index 0000000000..4759d2731c --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class definitionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=definitionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the definition client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py new file mode 100644 index 0000000000..d9e8bd0b17 --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class instanceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=instanceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the instance client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py new file mode 100644 index 0000000000..17915abed8 --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class paramsCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=paramsCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the params client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py new file mode 100644 index 0000000000..7860bb63cf --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py @@ -0,0 +1,175 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class predictionCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=predictionCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the prediction client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/setup.py b/owl-bot-staging/v1beta1/setup.py new file mode 100644 index 0000000000..601333bef6 --- /dev/null +++ b/owl-bot-staging/v1beta1/setup.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-aiplatform-v1beta1-schema-trainingjob-definition', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.trainingjob'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1beta1/tests/__init__.py b/owl-bot-staging/v1beta1/tests/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/__init__.py b/owl-bot-staging/v1beta1/tests/unit/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py new file mode 100644 index 0000000000..e66a15462d --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py @@ -0,0 +1,3983 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.dataset_service import DatasetServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.dataset_service import DatasetServiceClient +from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers +from google.cloud.aiplatform_v1beta1.services.dataset_service import transports +from google.cloud.aiplatform_v1beta1.services.dataset_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import annotation +from google.cloud.aiplatform_v1beta1.types import annotation_spec +from google.cloud.aiplatform_v1beta1.types import data_item +from google.cloud.aiplatform_v1beta1.types import dataset +from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset +from google.cloud.aiplatform_v1beta1.types import dataset_service +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert DatasetServiceClient._get_default_mtls_endpoint(None) is None + assert DatasetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + DatasetServiceClient, + DatasetServiceAsyncClient, +]) +def test_dataset_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + DatasetServiceClient, + DatasetServiceAsyncClient, +]) +def test_dataset_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.DatasetServiceGrpcTransport, "grpc"), + (transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + DatasetServiceClient, + DatasetServiceAsyncClient, +]) +def test_dataset_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_dataset_service_client_get_transport_class(): + transport = DatasetServiceClient.get_transport_class() + available_transports = [ + transports.DatasetServiceGrpcTransport, + ] + assert transport in available_transports + + transport = DatasetServiceClient.get_transport_class("grpc") + assert transport == transports.DatasetServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +def test_dataset_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) +@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_dataset_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), + (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_dataset_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_dataset_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = DatasetServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_dataset(transport: str = 'grpc', request_type=dataset_service.CreateDatasetRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_dataset_from_dict(): + test_create_dataset(request_type=dict) + + +def test_create_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + client.create_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + +@pytest.mark.asyncio +async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.CreateDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.CreateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_dataset_async_from_dict(): + await test_create_dataset_async(request_type=dict) + + +def test_create_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.CreateDatasetRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.CreateDatasetRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].dataset == gca_dataset.Dataset(name='name_value') + + +def test_create_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_dataset( + dataset_service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_dataset( + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].dataset == gca_dataset.Dataset(name='name_value') + + +@pytest.mark.asyncio +async def test_create_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_dataset( + dataset_service.CreateDatasetRequest(), + parent='parent_value', + dataset=gca_dataset.Dataset(name='name_value'), + ) + + +def test_get_dataset(transport: str = 'grpc', request_type=dataset_service.GetDatasetRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset( + name='name_value', + display_name='display_name_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_get_dataset_from_dict(): + test_get_dataset(request_type=dict) + + +def test_get_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + client.get_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + +@pytest.mark.asyncio +async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( + name='name_value', + display_name='display_name_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_dataset_async_from_dict(): + await test_get_dataset_async(request_type=dict) + + +def test_get_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetDatasetRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = dataset.Dataset() + client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetDatasetRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + await client.get_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_dataset( + dataset_service.GetDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_dataset( + dataset_service.GetDatasetRequest(), + name='name_value', + ) + + +def test_update_dataset(transport: str = 'grpc', request_type=dataset_service.UpdateDatasetRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_update_dataset_from_dict(): + test_update_dataset(request_type=dict) + + +def test_update_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + client.update_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + +@pytest.mark.asyncio +async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.UpdateDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( + name='name_value', + display_name='display_name_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.UpdateDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_dataset.Dataset) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_dataset_async_from_dict(): + await test_update_dataset_async(request_type=dict) + + +def test_update_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.UpdateDatasetRequest() + + request.dataset.name = 'dataset.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = gca_dataset.Dataset() + client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=dataset.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.UpdateDatasetRequest() + + request.dataset.name = 'dataset.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + await client.update_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'dataset.name=dataset.name/value', + ) in kw['metadata'] + + +def test_update_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_dataset( + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].dataset == gca_dataset.Dataset(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_dataset( + dataset_service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_dataset.Dataset() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_dataset( + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].dataset == gca_dataset.Dataset(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_dataset( + dataset_service.UpdateDatasetRequest(), + dataset=gca_dataset.Dataset(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_list_datasets(transport: str = 'grpc', request_type=dataset_service.ListDatasetsRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_datasets_from_dict(): + test_list_datasets(request_type=dict) + + +def test_list_datasets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + client.list_datasets() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + +@pytest.mark.asyncio +async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDatasetsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDatasetsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDatasetsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_datasets_async_from_dict(): + await test_list_datasets_async(request_type=dict) + + +def test_list_datasets_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDatasetsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = dataset_service.ListDatasetsResponse() + client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_datasets_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDatasetsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) + await client.list_datasets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_datasets_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_datasets_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_datasets( + dataset_service.ListDatasetsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_datasets_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDatasetsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_datasets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_datasets_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_datasets( + dataset_service.ListDatasetsRequest(), + parent='parent_value', + ) + + +def test_list_datasets_pager(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_datasets(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in results) + +def test_list_datasets_pages(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = list(client.list_datasets(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_datasets_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_datasets(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, dataset.Dataset) + for i in responses) + +@pytest.mark.asyncio +async def test_list_datasets_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_datasets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + dataset.Dataset(), + ], + next_page_token='abc', + ), + dataset_service.ListDatasetsResponse( + datasets=[], + next_page_token='def', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + ], + next_page_token='ghi', + ), + dataset_service.ListDatasetsResponse( + datasets=[ + dataset.Dataset(), + dataset.Dataset(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_datasets(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_dataset(transport: str = 'grpc', request_type=dataset_service.DeleteDatasetRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_dataset_from_dict(): + test_delete_dataset(request_type=dict) + + +def test_delete_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + client.delete_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + +@pytest.mark.asyncio +async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.DeleteDatasetRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.DeleteDatasetRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_dataset_async_from_dict(): + await test_delete_dataset_async(request_type=dict) + + +def test_delete_dataset_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.DeleteDatasetRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_dataset_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.DeleteDatasetRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_dataset(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_dataset_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_dataset_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_dataset( + dataset_service.DeleteDatasetRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_dataset), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_dataset( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_dataset_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_dataset( + dataset_service.DeleteDatasetRequest(), + name='name_value', + ) + + +def test_import_data(transport: str = 'grpc', request_type=dataset_service.ImportDataRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_data_from_dict(): + test_import_data(request_type=dict) + + +def test_import_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + client.import_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + +@pytest.mark.asyncio +async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ImportDataRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ImportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_data_async_from_dict(): + await test_import_data_async(request_type=dict) + + +def test_import_data_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ImportDataRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_data_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ImportDataRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.import_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_import_data_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_data( + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].import_configs == [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] + + +def test_import_data_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_data( + dataset_service.ImportDataRequest(), + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + +@pytest.mark.asyncio +async def test_import_data_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_data( + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].import_configs == [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] + + +@pytest.mark.asyncio +async def test_import_data_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_data( + dataset_service.ImportDataRequest(), + name='name_value', + import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], + ) + + +def test_export_data(transport: str = 'grpc', request_type=dataset_service.ExportDataRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_data_from_dict(): + test_export_data(request_type=dict) + + +def test_export_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + client.export_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + +@pytest.mark.asyncio +async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ExportDataRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ExportDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_data_async_from_dict(): + await test_export_data_async(request_type=dict) + + +def test_export_data_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ExportDataRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_data_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ExportDataRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_export_data_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_data( + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].export_config == dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + + +def test_export_data_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_data( + dataset_service.ExportDataRequest(), + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +@pytest.mark.asyncio +async def test_export_data_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_data( + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].export_config == dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) + + +@pytest.mark.asyncio +async def test_export_data_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_data( + dataset_service.ExportDataRequest(), + name='name_value', + export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), + ) + + +def test_list_data_items(transport: str = 'grpc', request_type=dataset_service.ListDataItemsRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataItemsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_data_items_from_dict(): + test_list_data_items(request_type=dict) + + +def test_list_data_items_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + client.list_data_items() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + +@pytest.mark.asyncio +async def test_list_data_items_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDataItemsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListDataItemsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataItemsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_data_items_async_from_dict(): + await test_list_data_items_async(request_type=dict) + + +def test_list_data_items_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDataItemsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + call.return_value = dataset_service.ListDataItemsResponse() + client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_data_items_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListDataItemsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) + await client.list_data_items(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_data_items_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_data_items( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_data_items_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_data_items( + dataset_service.ListDataItemsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_data_items_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListDataItemsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_data_items( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_data_items_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_data_items( + dataset_service.ListDataItemsRequest(), + parent='parent_value', + ) + + +def test_list_data_items_pager(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_data_items(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, data_item.DataItem) + for i in results) + +def test_list_data_items_pages(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + pages = list(client.list_data_items(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_data_items_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_data_items(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, data_item.DataItem) + for i in responses) + +@pytest.mark.asyncio +async def test_list_data_items_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_items), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + data_item.DataItem(), + ], + next_page_token='abc', + ), + dataset_service.ListDataItemsResponse( + data_items=[], + next_page_token='def', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + ], + next_page_token='ghi', + ), + dataset_service.ListDataItemsResponse( + data_items=[ + data_item.DataItem(), + data_item.DataItem(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_data_items(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_get_annotation_spec(transport: str = 'grpc', request_type=dataset_service.GetAnnotationSpecRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + ) + response = client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +def test_get_annotation_spec_from_dict(): + test_get_annotation_spec(request_type=dict) + + +def test_get_annotation_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + client.get_annotation_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetAnnotationSpecRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( + name='name_value', + display_name='display_name_value', + etag='etag_value', + )) + response = await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.GetAnnotationSpecRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, annotation_spec.AnnotationSpec) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_annotation_spec_async_from_dict(): + await test_get_annotation_spec_async(request_type=dict) + + +def test_get_annotation_spec_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetAnnotationSpecRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = annotation_spec.AnnotationSpec() + client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_annotation_spec_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.GetAnnotationSpecRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + await client.get_annotation_spec(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_annotation_spec_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_annotation_spec_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_annotation_spec( + dataset_service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = annotation_spec.AnnotationSpec() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_annotation_spec( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_annotation_spec_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_annotation_spec( + dataset_service.GetAnnotationSpecRequest(), + name='name_value', + ) + + +def test_list_annotations(transport: str = 'grpc', request_type=dataset_service.ListAnnotationsRequest): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAnnotationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_annotations_from_dict(): + test_list_annotations(request_type=dict) + + +def test_list_annotations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + client.list_annotations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + +@pytest.mark.asyncio +async def test_list_annotations_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListAnnotationsRequest): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == dataset_service.ListAnnotationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAnnotationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_annotations_async_from_dict(): + await test_list_annotations_async(request_type=dict) + + +def test_list_annotations_field_headers(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListAnnotationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + call.return_value = dataset_service.ListAnnotationsResponse() + client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_annotations_field_headers_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = dataset_service.ListAnnotationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) + await client.list_annotations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_annotations_flattened(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_annotations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_annotations_flattened_error(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_annotations( + dataset_service.ListAnnotationsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_annotations_flattened_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = dataset_service.ListAnnotationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_annotations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_annotations_flattened_error_async(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_annotations( + dataset_service.ListAnnotationsRequest(), + parent='parent_value', + ) + + +def test_list_annotations_pager(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_annotations(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, annotation.Annotation) + for i in results) + +def test_list_annotations_pages(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_annotations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_annotations_async_pager(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_annotations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, annotation.Annotation) + for i in responses) + +@pytest.mark.asyncio +async def test_list_annotations_async_pages(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_annotations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + annotation.Annotation(), + ], + next_page_token='abc', + ), + dataset_service.ListAnnotationsResponse( + annotations=[], + next_page_token='def', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + ], + next_page_token='ghi', + ), + dataset_service.ListAnnotationsResponse( + annotations=[ + annotation.Annotation(), + annotation.Annotation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_annotations(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DatasetServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = DatasetServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.DatasetServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.DatasetServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.DatasetServiceGrpcTransport, + ) + +def test_dataset_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.DatasetServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_dataset_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.DatasetServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_dataset', + 'get_dataset', + 'update_dataset', + 'list_datasets', + 'delete_dataset', + 'import_data', + 'export_data', + 'list_data_items', + 'get_annotation_spec', + 'list_annotations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_dataset_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_dataset_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_dataset_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DatasetServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_dataset_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DatasetServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_dataset_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DatasetServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_dataset_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.DatasetServiceGrpcTransport, + transports.DatasetServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_dataset_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.DatasetServiceGrpcTransport, grpc_helpers), + (transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_dataset_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_dataset_service_host_no_port(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_dataset_service_host_with_port(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_dataset_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DatasetServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_dataset_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.DatasetServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) +def test_dataset_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_dataset_service_grpc_lro_client(): + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_dataset_service_grpc_lro_async_client(): + client = DatasetServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotation_path(): + project = "squid" + location = "clam" + dataset = "whelk" + data_item = "octopus" + annotation = "oyster" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) + actual = DatasetServiceClient.annotation_path(project, location, dataset, data_item, annotation) + assert expected == actual + + +def test_parse_annotation_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "dataset": "mussel", + "data_item": "winkle", + "annotation": "nautilus", + } + path = DatasetServiceClient.annotation_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_annotation_path(path) + assert expected == actual + +def test_annotation_spec_path(): + project = "scallop" + location = "abalone" + dataset = "squid" + annotation_spec = "clam" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) + actual = DatasetServiceClient.annotation_spec_path(project, location, dataset, annotation_spec) + assert expected == actual + + +def test_parse_annotation_spec_path(): + expected = { + "project": "whelk", + "location": "octopus", + "dataset": "oyster", + "annotation_spec": "nudibranch", + } + path = DatasetServiceClient.annotation_spec_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_annotation_spec_path(path) + assert expected == actual + +def test_data_item_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + data_item = "nautilus" + expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) + actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item) + assert expected == actual + + +def test_parse_data_item_path(): + expected = { + "project": "scallop", + "location": "abalone", + "dataset": "squid", + "data_item": "clam", + } + path = DatasetServiceClient.data_item_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_data_item_path(path) + assert expected == actual + +def test_dataset_path(): + project = "whelk" + location = "octopus" + dataset = "oyster" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = DatasetServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "dataset": "mussel", + } + path = DatasetServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = DatasetServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = DatasetServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = DatasetServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = DatasetServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = DatasetServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = DatasetServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = DatasetServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = DatasetServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = DatasetServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = DatasetServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = DatasetServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: + client = DatasetServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = DatasetServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py new file mode 100644 index 0000000000..6d26f8c489 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -0,0 +1,2868 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.endpoint_service import EndpointServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.endpoint_service import EndpointServiceClient +from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.services.endpoint_service import transports +from google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import accelerator_type +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint +from google.cloud.aiplatform_v1beta1.types import endpoint_service +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert EndpointServiceClient._get_default_mtls_endpoint(None) is None + assert EndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + EndpointServiceClient, + EndpointServiceAsyncClient, +]) +def test_endpoint_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + EndpointServiceClient, + EndpointServiceAsyncClient, +]) +def test_endpoint_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.EndpointServiceGrpcTransport, "grpc"), + (transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + EndpointServiceClient, + EndpointServiceAsyncClient, +]) +def test_endpoint_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_endpoint_service_client_get_transport_class(): + transport = EndpointServiceClient.get_transport_class() + available_transports = [ + transports.EndpointServiceGrpcTransport, + ] + assert transport in available_transports + + transport = EndpointServiceClient.get_transport_class("grpc") + assert transport == transports.EndpointServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +def test_endpoint_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "true"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "false"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) +@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), + (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_endpoint_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = EndpointServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_endpoint(transport: str = 'grpc', request_type=endpoint_service.CreateEndpointRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_endpoint_from_dict(): + test_create_endpoint(request_type=dict) + + +def test_create_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + client.create_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + +@pytest.mark.asyncio +async def test_create_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.CreateEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.CreateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_endpoint_async_from_dict(): + await test_create_endpoint_async(request_type=dict) + + +def test_create_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.CreateEndpointRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.CreateEndpointRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_endpoint( + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') + + +def test_create_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_endpoint( + endpoint_service.CreateEndpointRequest(), + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_endpoint( + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') + + +@pytest.mark.asyncio +async def test_create_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_endpoint( + endpoint_service.CreateEndpointRequest(), + parent='parent_value', + endpoint=gca_endpoint.Endpoint(name='name_value'), + ) + + +def test_get_endpoint(transport: str = 'grpc', request_type=endpoint_service.GetEndpointRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_get_endpoint_from_dict(): + test_get_endpoint(request_type=dict) + + +def test_get_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + client.get_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + +@pytest.mark.asyncio +async def test_get_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.GetEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.GetEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_endpoint_async_from_dict(): + await test_get_endpoint_async(request_type=dict) + + +def test_get_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.GetEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + call.return_value = endpoint.Endpoint() + client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.GetEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) + await client.get_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_endpoint( + endpoint_service.GetEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint.Endpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_endpoint( + endpoint_service.GetEndpointRequest(), + name='name_value', + ) + + +def test_list_endpoints(transport: str = 'grpc', request_type=endpoint_service.ListEndpointsRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEndpointsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_endpoints_from_dict(): + test_list_endpoints(request_type=dict) + + +def test_list_endpoints_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + client.list_endpoints() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + +@pytest.mark.asyncio +async def test_list_endpoints_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.ListEndpointsRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.ListEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEndpointsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_endpoints_async_from_dict(): + await test_list_endpoints_async(request_type=dict) + + +def test_list_endpoints_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.ListEndpointsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + call.return_value = endpoint_service.ListEndpointsResponse() + client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_endpoints_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.ListEndpointsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) + await client.list_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_endpoints_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_endpoints_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_endpoints( + endpoint_service.ListEndpointsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_endpoints_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = endpoint_service.ListEndpointsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_endpoints_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_endpoints( + endpoint_service.ListEndpointsRequest(), + parent='parent_value', + ) + + +def test_list_endpoints_pager(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_endpoints(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, endpoint.Endpoint) + for i in results) + +def test_list_endpoints_pages(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + pages = list(client.list_endpoints(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_endpoints_async_pager(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_endpoints(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, endpoint.Endpoint) + for i in responses) + +@pytest.mark.asyncio +async def test_list_endpoints_async_pages(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + next_page_token='abc', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[], + next_page_token='def', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + ], + next_page_token='ghi', + ), + endpoint_service.ListEndpointsResponse( + endpoints=[ + endpoint.Endpoint(), + endpoint.Endpoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_endpoints(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_endpoint(transport: str = 'grpc', request_type=endpoint_service.UpdateEndpointRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_update_endpoint_from_dict(): + test_update_endpoint(request_type=dict) + + +def test_update_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + client.update_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + +@pytest.mark.asyncio +async def test_update_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UpdateEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UpdateEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_endpoint.Endpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_endpoint_async_from_dict(): + await test_update_endpoint_async(request_type=dict) + + +def test_update_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UpdateEndpointRequest() + + request.endpoint.name = 'endpoint.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + call.return_value = gca_endpoint.Endpoint() + client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint.name=endpoint.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UpdateEndpointRequest() + + request.endpoint.name = 'endpoint.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) + await client.update_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint.name=endpoint.name/value', + ) in kw['metadata'] + + +def test_update_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_endpoint( + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_endpoint( + endpoint_service.UpdateEndpointRequest(), + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_endpoint.Endpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_endpoint( + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_endpoint( + endpoint_service.UpdateEndpointRequest(), + endpoint=gca_endpoint.Endpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_endpoint(transport: str = 'grpc', request_type=endpoint_service.DeleteEndpointRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_endpoint_from_dict(): + test_delete_endpoint(request_type=dict) + + +def test_delete_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + client.delete_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + +@pytest.mark.asyncio +async def test_delete_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeleteEndpointRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeleteEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_endpoint_async_from_dict(): + await test_delete_endpoint_async(request_type=dict) + + +def test_delete_endpoint_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeleteEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_endpoint_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeleteEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_endpoint_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_endpoint_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_endpoint( + endpoint_service.DeleteEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_endpoint_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_endpoint_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_endpoint( + endpoint_service.DeleteEndpointRequest(), + name='name_value', + ) + + +def test_deploy_model(transport: str = 'grpc', request_type=endpoint_service.DeployModelRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_model_from_dict(): + test_deploy_model(request_type=dict) + + +def test_deploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + client.deploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + +@pytest.mark.asyncio +async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeployModelRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.DeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_model_async_from_dict(): + await test_deploy_model_async(request_type=dict) + + +def test_deploy_model_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeployModelRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_deploy_model_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.DeployModelRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.deploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +def test_deploy_model_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_model( + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].deployed_model == gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) + assert args[0].traffic_split == {'key_value': 541} + + +def test_deploy_model_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_model( + endpoint_service.DeployModelRequest(), + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + +@pytest.mark.asyncio +async def test_deploy_model_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_model( + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].deployed_model == gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) + assert args[0].traffic_split == {'key_value': 541} + + +@pytest.mark.asyncio +async def test_deploy_model_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_model( + endpoint_service.DeployModelRequest(), + endpoint='endpoint_value', + deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), + traffic_split={'key_value': 541}, + ) + + +def test_undeploy_model(transport: str = 'grpc', request_type=endpoint_service.UndeployModelRequest): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_model_from_dict(): + test_undeploy_model(request_type=dict) + + +def test_undeploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + client.undeploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + +@pytest.mark.asyncio +async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UndeployModelRequest): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == endpoint_service.UndeployModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_model_async_from_dict(): + await test_undeploy_model_async(request_type=dict) + + +def test_undeploy_model_field_headers(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UndeployModelRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undeploy_model_field_headers_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = endpoint_service.UndeployModelRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undeploy_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +def test_undeploy_model_flattened(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_model( + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].deployed_model_id == 'deployed_model_id_value' + assert args[0].traffic_split == {'key_value': 541} + + +def test_undeploy_model_flattened_error(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_model( + endpoint_service.UndeployModelRequest(), + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_model( + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].deployed_model_id == 'deployed_model_id_value' + assert args[0].traffic_split == {'key_value': 541} + + +@pytest.mark.asyncio +async def test_undeploy_model_flattened_error_async(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_model( + endpoint_service.UndeployModelRequest(), + endpoint='endpoint_value', + deployed_model_id='deployed_model_id_value', + traffic_split={'key_value': 541}, + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EndpointServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = EndpointServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.EndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.EndpointServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.EndpointServiceGrpcTransport, + ) + +def test_endpoint_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.EndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_endpoint_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.EndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_endpoint', + 'get_endpoint', + 'list_endpoints', + 'update_endpoint', + 'delete_endpoint', + 'deploy_model', + 'undeploy_model', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_endpoint_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_endpoint_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_endpoint_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EndpointServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_endpoint_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + EndpointServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_endpoint_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + EndpointServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_endpoint_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EndpointServiceGrpcTransport, + transports.EndpointServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_endpoint_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.EndpointServiceGrpcTransport, grpc_helpers), + (transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_endpoint_service_host_no_port(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_endpoint_service_host_with_port(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_endpoint_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EndpointServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_endpoint_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EndpointServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) +def test_endpoint_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_endpoint_service_grpc_lro_client(): + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_service_grpc_lro_async_client(): + client = EndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = EndpointServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = EndpointServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = EndpointServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = EndpointServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_model_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = EndpointServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = EndpointServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = EndpointServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = EndpointServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = EndpointServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = EndpointServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = EndpointServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = EndpointServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = EndpointServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = EndpointServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = EndpointServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: + client = EndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = EndpointServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py new file mode 100644 index 0000000000..323b59d601 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py @@ -0,0 +1,1391 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import transports +from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import feature_selector +from google.cloud.aiplatform_v1beta1.types import featurestore_online_service +from google.oauth2 import service_account +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreOnlineServingServiceClient, + FeaturestoreOnlineServingServiceAsyncClient, +]) +def test_featurestore_online_serving_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreOnlineServingServiceClient, + FeaturestoreOnlineServingServiceAsyncClient, +]) +def test_featurestore_online_serving_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreOnlineServingServiceClient, + FeaturestoreOnlineServingServiceAsyncClient, +]) +def test_featurestore_online_serving_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_featurestore_online_serving_service_client_get_transport_class(): + transport = FeaturestoreOnlineServingServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) +@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) +def test_featurestore_online_serving_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "true"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "false"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) +@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_online_serving_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), + (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_online_serving_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_featurestore_online_serving_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreOnlineServingServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.ReadFeatureValuesRequest): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse( + ) + response = client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +def test_read_feature_values_from_dict(): + test_read_feature_values(request_type=dict) + + +def test_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + client.read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.ReadFeatureValuesRequest): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse( + )) + response = await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_read_feature_values_async_from_dict(): + await test_read_feature_values_async(request_type=dict) + + +def test_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + + request.entity_type = 'entity_type/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.ReadFeatureValuesRequest() + + request.entity_type = 'entity_type/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) + await client.read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type/value', + ) in kw['metadata'] + + +def test_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == 'entity_type_value' + + +def test_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_online_service.ReadFeatureValuesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == 'entity_type_value' + + +@pytest.mark.asyncio +async def test_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_feature_values( + featurestore_online_service.ReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +def test_streaming_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + response = client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) + + +def test_streaming_read_feature_values_from_dict(): + test_streaming_read_feature_values(request_type=dict) + + +def test_streaming_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + client.streaming_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) + response = await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_async_from_dict(): + await test_streaming_read_feature_values_async(request_type=dict) + + +def test_streaming_read_feature_values_field_headers(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + + request.entity_type = 'entity_type/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_field_headers_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_online_service.StreamingReadFeatureValuesRequest() + + request.entity_type = 'entity_type/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) + await client.streaming_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type/value', + ) in kw['metadata'] + + +def test_streaming_read_feature_values_flattened(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.streaming_read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == 'entity_type_value' + + +def test_streaming_read_feature_values_flattened_error(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.streaming_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.streaming_read_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == 'entity_type_value' + + +@pytest.mark.asyncio +async def test_streaming_read_feature_values_flattened_error_async(): + client = FeaturestoreOnlineServingServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.streaming_read_feature_values( + featurestore_online_service.StreamingReadFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreOnlineServingServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FeaturestoreOnlineServingServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.FeaturestoreOnlineServingServiceGrpcTransport, + ) + +def test_featurestore_online_serving_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_featurestore_online_serving_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'read_feature_values', + 'streaming_read_feature_values', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +@requires_google_auth_gte_1_25_0 +def test_featurestore_online_serving_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_featurestore_online_serving_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_featurestore_online_serving_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreOnlineServingServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_featurestore_online_serving_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreOnlineServingServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_featurestore_online_serving_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreOnlineServingServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_featurestore_online_serving_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreOnlineServingServiceGrpcTransport, + transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_featurestore_online_serving_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers), + (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_featurestore_online_serving_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) +def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_featurestore_online_serving_service_host_no_port(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_featurestore_online_serving_service_host_with_port(): + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_featurestore_online_serving_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_online_serving_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) +def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) +def test_featurestore_online_serving_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + actual = FeaturestoreOnlineServingServiceClient.entity_type_path(project, location, featurestore, entity_type) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = FeaturestoreOnlineServingServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = FeaturestoreOnlineServingServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = FeaturestoreOnlineServingServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: + client = FeaturestoreOnlineServingServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py new file mode 100644 index 0000000000..99d37926b2 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -0,0 +1,6489 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.featurestore_service import FeaturestoreServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.featurestore_service import FeaturestoreServiceClient +from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers +from google.cloud.aiplatform_v1beta1.services.featurestore_service import transports +from google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import entity_type +from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type +from google.cloud.aiplatform_v1beta1.types import feature +from google.cloud.aiplatform_v1beta1.types import feature as gca_feature +from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats +from google.cloud.aiplatform_v1beta1.types import feature_selector +from google.cloud.aiplatform_v1beta1.types import featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore +from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring +from google.cloud.aiplatform_v1beta1.types import featurestore_service +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FeaturestoreServiceClient._get_default_mtls_endpoint(None) is None + assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert FeaturestoreServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreServiceClient, + FeaturestoreServiceAsyncClient, +]) +def test_featurestore_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreServiceClient, + FeaturestoreServiceAsyncClient, +]) +def test_featurestore_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.FeaturestoreServiceGrpcTransport, "grpc"), + (transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + FeaturestoreServiceClient, + FeaturestoreServiceAsyncClient, +]) +def test_featurestore_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_featurestore_service_client_get_transport_class(): + transport = FeaturestoreServiceClient.get_transport_class() + available_transports = [ + transports.FeaturestoreServiceGrpcTransport, + ] + assert transport in available_transports + + transport = FeaturestoreServiceClient.get_transport_class("grpc") + assert transport == transports.FeaturestoreServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) +@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) +def test_featurestore_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "true"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "false"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) +@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_featurestore_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), + (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_featurestore_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_featurestore_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = FeaturestoreServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_featurestore(transport: str = 'grpc', request_type=featurestore_service.CreateFeaturestoreRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_featurestore_from_dict(): + test_create_featurestore(request_type=dict) + + +def test_create_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + client.create_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_create_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_featurestore_async_from_dict(): + await test_create_featurestore_async(request_type=dict) + + +def test_create_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeaturestoreRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_featurestore( + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].featurestore == gca_featurestore.Featurestore(name='name_value') + + +def test_create_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_featurestore( + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].featurestore == gca_featurestore.Featurestore(name='name_value') + + +@pytest.mark.asyncio +async def test_create_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_featurestore( + featurestore_service.CreateFeaturestoreRequest(), + parent='parent_value', + featurestore=gca_featurestore.Featurestore(name='name_value'), + ) + + +def test_get_featurestore(transport: str = 'grpc', request_type=featurestore_service.GetFeaturestoreRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore( + name='name_value', + etag='etag_value', + state=featurestore.Featurestore.State.STABLE, + ) + response = client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore.Featurestore) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.state == featurestore.Featurestore.State.STABLE + + +def test_get_featurestore_from_dict(): + test_get_featurestore(request_type=dict) + + +def test_get_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + client.get_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_get_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore( + name='name_value', + etag='etag_value', + state=featurestore.Featurestore.State.STABLE, + )) + response = await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, featurestore.Featurestore) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.state == featurestore.Featurestore.State.STABLE + + +@pytest.mark.asyncio +async def test_get_featurestore_async_from_dict(): + await test_get_featurestore_async(request_type=dict) + + +def test_get_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + call.return_value = featurestore.Featurestore() + client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeaturestoreRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) + await client.get_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_featurestore( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore.Featurestore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_featurestore( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_featurestore( + featurestore_service.GetFeaturestoreRequest(), + name='name_value', + ) + + +def test_list_featurestores(transport: str = 'grpc', request_type=featurestore_service.ListFeaturestoresRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse( + next_page_token='next_page_token_value', + ) + response = client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturestoresPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_featurestores_from_dict(): + test_list_featurestores(request_type=dict) + + +def test_list_featurestores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + client.list_featurestores() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + +@pytest.mark.asyncio +async def test_list_featurestores_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturestoresRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturestoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturestoresAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_featurestores_async_from_dict(): + await test_list_featurestores_async(request_type=dict) + + +def test_list_featurestores_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + call.return_value = featurestore_service.ListFeaturestoresResponse() + client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_featurestores_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturestoresRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) + await client.list_featurestores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_featurestores_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_featurestores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_featurestores_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturestoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_featurestores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_featurestores_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_featurestores( + featurestore_service.ListFeaturestoresRequest(), + parent='parent_value', + ) + + +def test_list_featurestores_pager(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_featurestores(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, featurestore.Featurestore) + for i in results) + +def test_list_featurestores_pages(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_featurestores(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_featurestores_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_featurestores(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, featurestore.Featurestore) + for i in responses) + +@pytest.mark.asyncio +async def test_list_featurestores_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_featurestores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[], + next_page_token='def', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturestoresResponse( + featurestores=[ + featurestore.Featurestore(), + featurestore.Featurestore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_featurestores(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_featurestore(transport: str = 'grpc', request_type=featurestore_service.UpdateFeaturestoreRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_featurestore_from_dict(): + test_update_featurestore(request_type=dict) + + +def test_update_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + client.update_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_update_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_featurestore_async_from_dict(): + await test_update_featurestore_async(request_type=dict) + + +def test_update_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + + request.featurestore.name = 'featurestore.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore.name=featurestore.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeaturestoreRequest() + + request.featurestore.name = 'featurestore.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore.name=featurestore.name/value', + ) in kw['metadata'] + + +def test_update_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].featurestore == gca_featurestore.Featurestore(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_featurestore( + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].featurestore == gca_featurestore.Featurestore(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_featurestore( + featurestore_service.UpdateFeaturestoreRequest(), + featurestore=gca_featurestore.Featurestore(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_featurestore(transport: str = 'grpc', request_type=featurestore_service.DeleteFeaturestoreRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_featurestore_from_dict(): + test_delete_featurestore(request_type=dict) + + +def test_delete_featurestore_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + client.delete_featurestore() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + +@pytest.mark.asyncio +async def test_delete_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeaturestoreRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeaturestoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_featurestore_async_from_dict(): + await test_delete_featurestore_async(request_type=dict) + + +def test_delete_featurestore_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_featurestore_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeaturestoreRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_featurestore(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_featurestore_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_featurestore( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_featurestore_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_featurestore), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_featurestore( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_featurestore_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_featurestore( + featurestore_service.DeleteFeaturestoreRequest(), + name='name_value', + ) + + +def test_create_entity_type(transport: str = 'grpc', request_type=featurestore_service.CreateEntityTypeRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_entity_type_from_dict(): + test_create_entity_type(request_type=dict) + + +def test_create_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + client.create_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_create_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_entity_type_async_from_dict(): + await test_create_entity_type_async(request_type=dict) + + +def test_create_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateEntityTypeRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_entity_type( + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].entity_type == gca_entity_type.EntityType(name='name_value') + + +def test_create_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_entity_type( + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].entity_type == gca_entity_type.EntityType(name='name_value') + + +@pytest.mark.asyncio +async def test_create_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_entity_type( + featurestore_service.CreateEntityTypeRequest(), + parent='parent_value', + entity_type=gca_entity_type.EntityType(name='name_value'), + ) + + +def test_get_entity_type(transport: str = 'grpc', request_type=featurestore_service.GetEntityTypeRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + ) + response = client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_get_entity_type_from_dict(): + test_get_entity_type(request_type=dict) + + +def test_get_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + client.get_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_get_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + )) + response = await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_entity_type_async_from_dict(): + await test_get_entity_type_async(request_type=dict) + + +def test_get_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + call.return_value = entity_type.EntityType() + client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetEntityTypeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) + await client.get_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_entity_type( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_entity_type( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_entity_type( + featurestore_service.GetEntityTypeRequest(), + name='name_value', + ) + + +def test_list_entity_types(transport: str = 'grpc', request_type=featurestore_service.ListEntityTypesRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntityTypesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_entity_types_from_dict(): + test_list_entity_types(request_type=dict) + + +def test_list_entity_types_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + client.list_entity_types() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + +@pytest.mark.asyncio +async def test_list_entity_types_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListEntityTypesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListEntityTypesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListEntityTypesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_entity_types_async_from_dict(): + await test_list_entity_types_async(request_type=dict) + + +def test_list_entity_types_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + call.return_value = featurestore_service.ListEntityTypesResponse() + client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_entity_types_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListEntityTypesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) + await client.list_entity_types(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_entity_types_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_entity_types( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_entity_types_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListEntityTypesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_entity_types( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_entity_types_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_entity_types( + featurestore_service.ListEntityTypesRequest(), + parent='parent_value', + ) + + +def test_list_entity_types_pager(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_entity_types(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, entity_type.EntityType) + for i in results) + +def test_list_entity_types_pages(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + pages = list(client.list_entity_types(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_entity_types_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_entity_types(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, entity_type.EntityType) + for i in responses) + +@pytest.mark.asyncio +async def test_list_entity_types_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_entity_types), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + entity_type.EntityType(), + ], + next_page_token='abc', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[], + next_page_token='def', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + ], + next_page_token='ghi', + ), + featurestore_service.ListEntityTypesResponse( + entity_types=[ + entity_type.EntityType(), + entity_type.EntityType(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_entity_types(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_entity_type(transport: str = 'grpc', request_type=featurestore_service.UpdateEntityTypeRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + ) + response = client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_update_entity_type_from_dict(): + test_update_entity_type(request_type=dict) + + +def test_update_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + client.update_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_update_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType( + name='name_value', + description='description_value', + etag='etag_value', + )) + response = await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_entity_type.EntityType) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_entity_type_async_from_dict(): + await test_update_entity_type_async(request_type=dict) + + +def test_update_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + + request.entity_type.name = 'entity_type.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + call.return_value = gca_entity_type.EntityType() + client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type.name=entity_type.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateEntityTypeRequest() + + request.entity_type.name = 'entity_type.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) + await client.update_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type.name=entity_type.name/value', + ) in kw['metadata'] + + +def test_update_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_entity_type( + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == gca_entity_type.EntityType(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_entity_type.EntityType() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_entity_type( + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == gca_entity_type.EntityType(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_entity_type( + featurestore_service.UpdateEntityTypeRequest(), + entity_type=gca_entity_type.EntityType(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_entity_type(transport: str = 'grpc', request_type=featurestore_service.DeleteEntityTypeRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_entity_type_from_dict(): + test_delete_entity_type(request_type=dict) + + +def test_delete_entity_type_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + client.delete_entity_type() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + +@pytest.mark.asyncio +async def test_delete_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteEntityTypeRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteEntityTypeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_entity_type_async_from_dict(): + await test_delete_entity_type_async(request_type=dict) + + +def test_delete_entity_type_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_entity_type_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteEntityTypeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_entity_type(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_entity_type_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_entity_type( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_entity_type_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_entity_type), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_entity_type( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_entity_type_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_entity_type( + featurestore_service.DeleteEntityTypeRequest(), + name='name_value', + ) + + +def test_create_feature(transport: str = 'grpc', request_type=featurestore_service.CreateFeatureRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_feature_from_dict(): + test_create_feature(request_type=dict) + + +def test_create_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + client.create_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + +@pytest.mark.asyncio +async def test_create_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.CreateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_feature_async_from_dict(): + await test_create_feature_async(request_type=dict) + + +def test_create_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.CreateFeatureRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_feature( + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].feature == gca_feature.Feature(name='name_value') + + +def test_create_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_feature( + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].feature == gca_feature.Feature(name='name_value') + + +@pytest.mark.asyncio +async def test_create_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_feature( + featurestore_service.CreateFeatureRequest(), + parent='parent_value', + feature=gca_feature.Feature(name='name_value'), + ) + + +def test_batch_create_features(transport: str = 'grpc', request_type=featurestore_service.BatchCreateFeaturesRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_create_features_from_dict(): + test_batch_create_features(request_type=dict) + + +def test_batch_create_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + client.batch_create_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + +@pytest.mark.asyncio +async def test_batch_create_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchCreateFeaturesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchCreateFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_create_features_async_from_dict(): + await test_batch_create_features_async(request_type=dict) + + +def test_batch_create_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_create_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchCreateFeaturesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_create_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_batch_create_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_create_features( + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].requests == [featurestore_service.CreateFeatureRequest(parent='parent_value')] + + +def test_batch_create_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_create_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_create_features( + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].requests == [featurestore_service.CreateFeatureRequest(parent='parent_value')] + + +@pytest.mark.asyncio +async def test_batch_create_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_create_features( + featurestore_service.BatchCreateFeaturesRequest(), + parent='parent_value', + requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], + ) + + +def test_get_feature(transport: str = 'grpc', request_type=featurestore_service.GetFeatureRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature( + name='name_value', + description='description_value', + value_type=feature.Feature.ValueType.BOOL, + etag='etag_value', + ) + response = client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + + +def test_get_feature_from_dict(): + test_get_feature(request_type=dict) + + +def test_get_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + client.get_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + +@pytest.mark.asyncio +async def test_get_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature( + name='name_value', + description='description_value', + value_type=feature.Feature.ValueType.BOOL, + etag='etag_value', + )) + response = await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.GetFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_feature_async_from_dict(): + await test_get_feature_async(request_type=dict) + + +def test_get_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + call.return_value = feature.Feature() + client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.GetFeatureRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + await client.get_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_feature( + featurestore_service.GetFeatureRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_feature( + featurestore_service.GetFeatureRequest(), + name='name_value', + ) + + +def test_list_features(transport: str = 'grpc', request_type=featurestore_service.ListFeaturesRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_features_from_dict(): + test_list_features(request_type=dict) + + +def test_list_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + client.list_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + +@pytest.mark.asyncio +async def test_list_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ListFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListFeaturesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_features_async_from_dict(): + await test_list_features_async(request_type=dict) + + +def test_list_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + call.return_value = featurestore_service.ListFeaturesResponse() + client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ListFeaturesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) + await client.list_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_features( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_features( + featurestore_service.ListFeaturesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.ListFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_features( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_features( + featurestore_service.ListFeaturesRequest(), + parent='parent_value', + ) + + +def test_list_features_pager(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_features(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) + for i in results) + +def test_list_features_pages(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = list(client.list_features(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_features(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) + for i in responses) + +@pytest.mark.asyncio +async def test_list_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.ListFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.ListFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_features(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_feature(transport: str = 'grpc', request_type=featurestore_service.UpdateFeatureRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature( + name='name_value', + description='description_value', + value_type=gca_feature.Feature.ValueType.BOOL, + etag='etag_value', + ) + response = client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == gca_feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + + +def test_update_feature_from_dict(): + test_update_feature(request_type=dict) + + +def test_update_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + client.update_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + +@pytest.mark.asyncio +async def test_update_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature( + name='name_value', + description='description_value', + value_type=gca_feature.Feature.ValueType.BOOL, + etag='etag_value', + )) + response = await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.UpdateFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_feature.Feature) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.value_type == gca_feature.Feature.ValueType.BOOL + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_feature_async_from_dict(): + await test_update_feature_async(request_type=dict) + + +def test_update_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + + request.feature.name = 'feature.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + call.return_value = gca_feature.Feature() + client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'feature.name=feature.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.UpdateFeatureRequest() + + request.feature.name = 'feature.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + await client.update_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'feature.name=feature.name/value', + ) in kw['metadata'] + + +def test_update_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_feature( + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].feature == gca_feature.Feature(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_feature.Feature() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_feature( + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].feature == gca_feature.Feature(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_feature( + featurestore_service.UpdateFeatureRequest(), + feature=gca_feature.Feature(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_feature(transport: str = 'grpc', request_type=featurestore_service.DeleteFeatureRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_feature_from_dict(): + test_delete_feature(request_type=dict) + + +def test_delete_feature_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + client.delete_feature() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + +@pytest.mark.asyncio +async def test_delete_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeatureRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.DeleteFeatureRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_feature_async_from_dict(): + await test_delete_feature_async(request_type=dict) + + +def test_delete_feature_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_feature_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.DeleteFeatureRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_feature(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_feature_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_feature_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_feature( + featurestore_service.DeleteFeatureRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_feature_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_feature), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_feature( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_feature_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_feature( + featurestore_service.DeleteFeatureRequest(), + name='name_value', + ) + + +def test_import_feature_values(transport: str = 'grpc', request_type=featurestore_service.ImportFeatureValuesRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_feature_values_from_dict(): + test_import_feature_values(request_type=dict) + + +def test_import_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + client.import_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_import_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ImportFeatureValuesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ImportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_feature_values_async_from_dict(): + await test_import_feature_values_async(request_type=dict) + + +def test_import_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + + request.entity_type = 'entity_type/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_import_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ImportFeatureValuesRequest() + + request.entity_type = 'entity_type/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.import_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type/value', + ) in kw['metadata'] + + +def test_import_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == 'entity_type_value' + + +def test_import_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.import_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == 'entity_type_value' + + +@pytest.mark.asyncio +async def test_import_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_feature_values( + featurestore_service.ImportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +def test_batch_read_feature_values(transport: str = 'grpc', request_type=featurestore_service.BatchReadFeatureValuesRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_read_feature_values_from_dict(): + test_batch_read_feature_values(request_type=dict) + + +def test_batch_read_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + client.batch_read_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchReadFeatureValuesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_async_from_dict(): + await test_batch_read_feature_values_async(request_type=dict) + + +def test_batch_read_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + + request.featurestore = 'featurestore/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore=featurestore/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.BatchReadFeatureValuesRequest() + + request.featurestore = 'featurestore/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_read_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'featurestore=featurestore/value', + ) in kw['metadata'] + + +def test_batch_read_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_read_feature_values( + featurestore='featurestore_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].featurestore == 'featurestore_value' + + +def test_batch_read_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore='featurestore_value', + ) + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_read_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_read_feature_values( + featurestore='featurestore_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].featurestore == 'featurestore_value' + + +@pytest.mark.asyncio +async def test_batch_read_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_read_feature_values( + featurestore_service.BatchReadFeatureValuesRequest(), + featurestore='featurestore_value', + ) + + +def test_export_feature_values(transport: str = 'grpc', request_type=featurestore_service.ExportFeatureValuesRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_feature_values_from_dict(): + test_export_feature_values(request_type=dict) + + +def test_export_feature_values_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + client.export_feature_values() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + +@pytest.mark.asyncio +async def test_export_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ExportFeatureValuesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.ExportFeatureValuesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_feature_values_async_from_dict(): + await test_export_feature_values_async(request_type=dict) + + +def test_export_feature_values_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + + request.entity_type = 'entity_type/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_feature_values_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.ExportFeatureValuesRequest() + + request.entity_type = 'entity_type/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_feature_values(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'entity_type=entity_type/value', + ) in kw['metadata'] + + +def test_export_feature_values_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == 'entity_type_value' + + +def test_export_feature_values_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_feature_values), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_feature_values( + entity_type='entity_type_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].entity_type == 'entity_type_value' + + +@pytest.mark.asyncio +async def test_export_feature_values_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_feature_values( + featurestore_service.ExportFeatureValuesRequest(), + entity_type='entity_type_value', + ) + + +def test_search_features(transport: str = 'grpc', request_type=featurestore_service.SearchFeaturesRequest): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchFeaturesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_features_from_dict(): + test_search_features(request_type=dict) + + +def test_search_features_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + client.search_features() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + +@pytest.mark.asyncio +async def test_search_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.SearchFeaturesRequest): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == featurestore_service.SearchFeaturesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchFeaturesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_features_async_from_dict(): + await test_search_features_async(request_type=dict) + + +def test_search_features_field_headers(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + + request.location = 'location/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + call.return_value = featurestore_service.SearchFeaturesResponse() + client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'location=location/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_features_field_headers_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = featurestore_service.SearchFeaturesRequest() + + request.location = 'location/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) + await client.search_features(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'location=location/value', + ) in kw['metadata'] + + +def test_search_features_flattened(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_features( + location='location_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].location == 'location_value' + + +def test_search_features_flattened_error(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_features( + featurestore_service.SearchFeaturesRequest(), + location='location_value', + ) + + +@pytest.mark.asyncio +async def test_search_features_flattened_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = featurestore_service.SearchFeaturesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_features( + location='location_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].location == 'location_value' + + +@pytest.mark.asyncio +async def test_search_features_flattened_error_async(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_features( + featurestore_service.SearchFeaturesRequest(), + location='location_value', + ) + + +def test_search_features_pager(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('location', ''), + )), + ) + pager = client.search_features(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, feature.Feature) + for i in results) + +def test_search_features_pages(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = list(client.search_features(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_features_async_pager(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_features(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, feature.Feature) + for i in responses) + +@pytest.mark.asyncio +async def test_search_features_async_pages(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_features), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + feature.Feature(), + ], + next_page_token='abc', + ), + featurestore_service.SearchFeaturesResponse( + features=[], + next_page_token='def', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + ], + next_page_token='ghi', + ), + featurestore_service.SearchFeaturesResponse( + features=[ + feature.Feature(), + feature.Feature(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_features(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FeaturestoreServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FeaturestoreServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.FeaturestoreServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.FeaturestoreServiceGrpcTransport, + ) + +def test_featurestore_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FeaturestoreServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_featurestore_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.FeaturestoreServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_featurestore', + 'get_featurestore', + 'list_featurestores', + 'update_featurestore', + 'delete_featurestore', + 'create_entity_type', + 'get_entity_type', + 'list_entity_types', + 'update_entity_type', + 'delete_entity_type', + 'create_feature', + 'batch_create_features', + 'get_feature', + 'list_features', + 'update_feature', + 'delete_feature', + 'import_feature_values', + 'batch_read_feature_values', + 'export_feature_values', + 'search_features', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_featurestore_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_featurestore_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_featurestore_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FeaturestoreServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_featurestore_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_featurestore_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FeaturestoreServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_featurestore_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.FeaturestoreServiceGrpcTransport, + transports.FeaturestoreServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_featurestore_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.FeaturestoreServiceGrpcTransport, grpc_helpers), + (transports.FeaturestoreServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_featurestore_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) +def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_featurestore_service_host_no_port(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_featurestore_service_host_with_port(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_featurestore_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_featurestore_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) +def test_featurestore_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) +def test_featurestore_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_featurestore_service_grpc_lro_client(): + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_featurestore_service_grpc_lro_async_client(): + client = FeaturestoreServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_entity_type_path(): + project = "squid" + location = "clam" + featurestore = "whelk" + entity_type = "octopus" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) + actual = FeaturestoreServiceClient.entity_type_path(project, location, featurestore, entity_type) + assert expected == actual + + +def test_parse_entity_type_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "featurestore": "cuttlefish", + "entity_type": "mussel", + } + path = FeaturestoreServiceClient.entity_type_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_entity_type_path(path) + assert expected == actual + +def test_feature_path(): + project = "winkle" + location = "nautilus" + featurestore = "scallop" + entity_type = "abalone" + feature = "squid" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) + actual = FeaturestoreServiceClient.feature_path(project, location, featurestore, entity_type, feature) + assert expected == actual + + +def test_parse_feature_path(): + expected = { + "project": "clam", + "location": "whelk", + "featurestore": "octopus", + "entity_type": "oyster", + "feature": "nudibranch", + } + path = FeaturestoreServiceClient.feature_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_feature_path(path) + assert expected == actual + +def test_featurestore_path(): + project = "cuttlefish" + location = "mussel" + featurestore = "winkle" + expected = "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) + actual = FeaturestoreServiceClient.featurestore_path(project, location, featurestore) + assert expected == actual + + +def test_parse_featurestore_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "featurestore": "abalone", + } + path = FeaturestoreServiceClient.featurestore_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_featurestore_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = FeaturestoreServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = FeaturestoreServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = FeaturestoreServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = FeaturestoreServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = FeaturestoreServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = FeaturestoreServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = FeaturestoreServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = FeaturestoreServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = FeaturestoreServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = FeaturestoreServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FeaturestoreServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: + client = FeaturestoreServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = FeaturestoreServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py new file mode 100644 index 0000000000..d5d3870fb6 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -0,0 +1,2859 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import IndexEndpointServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import IndexEndpointServiceClient +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import transports +from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint +from google.cloud.aiplatform_v1beta1.types import index_endpoint_service +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert IndexEndpointServiceClient._get_default_mtls_endpoint(None) is None + assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert IndexEndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + IndexEndpointServiceClient, + IndexEndpointServiceAsyncClient, +]) +def test_index_endpoint_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + IndexEndpointServiceClient, + IndexEndpointServiceAsyncClient, +]) +def test_index_endpoint_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.IndexEndpointServiceGrpcTransport, "grpc"), + (transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + IndexEndpointServiceClient, + IndexEndpointServiceAsyncClient, +]) +def test_index_endpoint_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_index_endpoint_service_client_get_transport_class(): + transport = IndexEndpointServiceClient.get_transport_class() + available_transports = [ + transports.IndexEndpointServiceGrpcTransport, + ] + assert transport in available_transports + + transport = IndexEndpointServiceClient.get_transport_class("grpc") + assert transport == transports.IndexEndpointServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) +@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) +def test_index_endpoint_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "true"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "false"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) +@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_index_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), + (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_index_endpoint_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = IndexEndpointServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.CreateIndexEndpointRequest): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_index_endpoint_from_dict(): + test_create_index_endpoint(request_type=dict) + + +def test_create_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + client.create_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + +@pytest.mark.asyncio +async def test_create_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.CreateIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_index_endpoint_async_from_dict(): + await test_create_index_endpoint_async(request_type=dict) + + +def test_create_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.CreateIndexEndpointRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.CreateIndexEndpointRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_index_endpoint( + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint(name='name_value') + + +def test_create_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_index_endpoint( + index_endpoint_service.CreateIndexEndpointRequest(), + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_index_endpoint( + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint(name='name_value') + + +@pytest.mark.asyncio +async def test_create_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_index_endpoint( + index_endpoint_service.CreateIndexEndpointRequest(), + parent='parent_value', + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + ) + + +def test_get_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.GetIndexEndpointRequest): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + ) + response = client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + + +def test_get_index_endpoint_from_dict(): + test_get_index_endpoint(request_type=dict) + + +def test_get_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + client.get_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + +@pytest.mark.asyncio +async def test_get_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.GetIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + )) + response = await client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.GetIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + + +@pytest.mark.asyncio +async def test_get_index_endpoint_async_from_dict(): + await test_get_index_endpoint_async(request_type=dict) + + +def test_get_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.GetIndexEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + call.return_value = index_endpoint.IndexEndpoint() + client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.GetIndexEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) + await client.get_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_index_endpoint( + index_endpoint_service.GetIndexEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint.IndexEndpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_index_endpoint( + index_endpoint_service.GetIndexEndpointRequest(), + name='name_value', + ) + + +def test_list_index_endpoints(transport: str = 'grpc', request_type=index_endpoint_service.ListIndexEndpointsRequest): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexEndpointsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_index_endpoints_from_dict(): + test_list_index_endpoints(request_type=dict) + + +def test_list_index_endpoints_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + client.list_index_endpoints() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.ListIndexEndpointsRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexEndpointsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_from_dict(): + await test_list_index_endpoints_async(request_type=dict) + + +def test_list_index_endpoints_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.ListIndexEndpointsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_index_endpoints_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.ListIndexEndpointsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) + await client.list_index_endpoints(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_index_endpoints_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_index_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_index_endpoints_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_index_endpoints( + index_endpoint_service.ListIndexEndpointsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_index_endpoints_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_endpoint_service.ListIndexEndpointsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_index_endpoints( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_index_endpoints_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_index_endpoints( + index_endpoint_service.ListIndexEndpointsRequest(), + parent='parent_value', + ) + + +def test_list_index_endpoints_pager(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_index_endpoints(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, index_endpoint.IndexEndpoint) + for i in results) + +def test_list_index_endpoints_pages(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + pages = list(client.list_index_endpoints(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_pager(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_index_endpoints(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, index_endpoint.IndexEndpoint) + for i in responses) + +@pytest.mark.asyncio +async def test_list_index_endpoints_async_pages(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_index_endpoints), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + next_page_token='abc', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[], + next_page_token='def', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + ], + next_page_token='ghi', + ), + index_endpoint_service.ListIndexEndpointsResponse( + index_endpoints=[ + index_endpoint.IndexEndpoint(), + index_endpoint.IndexEndpoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_index_endpoints(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.UpdateIndexEndpointRequest): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + ) + response = client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + + +def test_update_index_endpoint_from_dict(): + test_update_index_endpoint(request_type=dict) + + +def test_update_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + client.update_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + +@pytest.mark.asyncio +async def test_update_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UpdateIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + network='network_value', + )) + response = await client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_index_endpoint.IndexEndpoint) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.network == 'network_value' + + +@pytest.mark.asyncio +async def test_update_index_endpoint_async_from_dict(): + await test_update_index_endpoint_async(request_type=dict) + + +def test_update_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UpdateIndexEndpointRequest() + + request.index_endpoint.name = 'index_endpoint.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + call.return_value = gca_index_endpoint.IndexEndpoint() + client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint.name=index_endpoint.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UpdateIndexEndpointRequest() + + request.index_endpoint.name = 'index_endpoint.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) + await client.update_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint.name=index_endpoint.name/value', + ) in kw['metadata'] + + +def test_update_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_index_endpoint( + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_index_endpoint( + index_endpoint_service.UpdateIndexEndpointRequest(), + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_index_endpoint.IndexEndpoint() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_index_endpoint( + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_index_endpoint( + index_endpoint_service.UpdateIndexEndpointRequest(), + index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.DeleteIndexEndpointRequest): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_index_endpoint_from_dict(): + test_delete_index_endpoint(request_type=dict) + + +def test_delete_index_endpoint_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + client.delete_index_endpoint() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeleteIndexEndpointRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_async_from_dict(): + await test_delete_index_endpoint_async(request_type=dict) + + +def test_delete_index_endpoint_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeleteIndexEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeleteIndexEndpointRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_index_endpoint(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_index_endpoint_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_index_endpoint_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_index_endpoint( + index_endpoint_service.DeleteIndexEndpointRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index_endpoint), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_index_endpoint( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_index_endpoint_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_index_endpoint( + index_endpoint_service.DeleteIndexEndpointRequest(), + name='name_value', + ) + + +def test_deploy_index(transport: str = 'grpc', request_type=index_endpoint_service.DeployIndexRequest): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_deploy_index_from_dict(): + test_deploy_index(request_type=dict) + + +def test_deploy_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + client.deploy_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeployIndexRequest() + + +@pytest.mark.asyncio +async def test_deploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeployIndexRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.DeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_deploy_index_async_from_dict(): + await test_deploy_index_async(request_type=dict) + + +def test_deploy_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeployIndexRequest() + + request.index_endpoint = 'index_endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_deploy_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.DeployIndexRequest() + + request.index_endpoint = 'index_endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.deploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint/value', + ) in kw['metadata'] + + +def test_deploy_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.deploy_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].index_endpoint == 'index_endpoint_value' + assert args[0].deployed_index == gca_index_endpoint.DeployedIndex(id='id_value') + + +def test_deploy_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deploy_index( + index_endpoint_service.DeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + +@pytest.mark.asyncio +async def test_deploy_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.deploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.deploy_index( + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].index_endpoint == 'index_endpoint_value' + assert args[0].deployed_index == gca_index_endpoint.DeployedIndex(id='id_value') + + +@pytest.mark.asyncio +async def test_deploy_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.deploy_index( + index_endpoint_service.DeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), + ) + + +def test_undeploy_index(transport: str = 'grpc', request_type=index_endpoint_service.UndeployIndexRequest): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undeploy_index_from_dict(): + test_undeploy_index(request_type=dict) + + +def test_undeploy_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + client.undeploy_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + +@pytest.mark.asyncio +async def test_undeploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UndeployIndexRequest): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.UndeployIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undeploy_index_async_from_dict(): + await test_undeploy_index_async(request_type=dict) + + +def test_undeploy_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UndeployIndexRequest() + + request.index_endpoint = 'index_endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undeploy_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.UndeployIndexRequest() + + request.index_endpoint = 'index_endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undeploy_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index_endpoint=index_endpoint/value', + ) in kw['metadata'] + + +def test_undeploy_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undeploy_index( + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].index_endpoint == 'index_endpoint_value' + assert args[0].deployed_index_id == 'deployed_index_id_value' + + +def test_undeploy_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undeploy_index( + index_endpoint_service.UndeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + +@pytest.mark.asyncio +async def test_undeploy_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undeploy_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undeploy_index( + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].index_endpoint == 'index_endpoint_value' + assert args[0].deployed_index_id == 'deployed_index_id_value' + + +@pytest.mark.asyncio +async def test_undeploy_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undeploy_index( + index_endpoint_service.UndeployIndexRequest(), + index_endpoint='index_endpoint_value', + deployed_index_id='deployed_index_id_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexEndpointServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = IndexEndpointServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexEndpointServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.IndexEndpointServiceGrpcTransport, + ) + +def test_index_endpoint_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.IndexEndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_index_endpoint_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.IndexEndpointServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_index_endpoint', + 'get_index_endpoint', + 'list_index_endpoints', + 'update_index_endpoint', + 'delete_index_endpoint', + 'deploy_index', + 'undeploy_index', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_index_endpoint_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexEndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_index_endpoint_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexEndpointServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_index_endpoint_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexEndpointServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_index_endpoint_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + IndexEndpointServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_index_endpoint_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + IndexEndpointServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_index_endpoint_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexEndpointServiceGrpcTransport, + transports.IndexEndpointServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_index_endpoint_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.IndexEndpointServiceGrpcTransport, grpc_helpers), + (transports.IndexEndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_index_endpoint_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) +def test_index_endpoint_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_index_endpoint_service_host_no_port(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_index_endpoint_service_host_with_port(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_index_endpoint_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexEndpointServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_index_endpoint_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) +def test_index_endpoint_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) +def test_index_endpoint_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_index_endpoint_service_grpc_lro_client(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_endpoint_service_grpc_lro_async_client(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_path(): + project = "squid" + location = "clam" + index = "whelk" + expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + actual = IndexEndpointServiceClient.index_path(project, location, index) + assert expected == actual + + +def test_parse_index_path(): + expected = { + "project": "octopus", + "location": "oyster", + "index": "nudibranch", + } + path = IndexEndpointServiceClient.index_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_index_path(path) + assert expected == actual + +def test_index_endpoint_path(): + project = "cuttlefish" + location = "mussel" + index_endpoint = "winkle" + expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + actual = IndexEndpointServiceClient.index_endpoint_path(project, location, index_endpoint) + assert expected == actual + + +def test_parse_index_endpoint_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "index_endpoint": "abalone", + } + path = IndexEndpointServiceClient.index_endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_index_endpoint_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = IndexEndpointServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = IndexEndpointServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = IndexEndpointServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = IndexEndpointServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = IndexEndpointServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = IndexEndpointServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = IndexEndpointServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = IndexEndpointServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = IndexEndpointServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = IndexEndpointServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = IndexEndpointServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = IndexEndpointServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py new file mode 100644 index 0000000000..18f0fe79b9 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py @@ -0,0 +1,2370 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceClient +from google.cloud.aiplatform_v1beta1.services.index_service import pagers +from google.cloud.aiplatform_v1beta1.services.index_service import transports +from google.cloud.aiplatform_v1beta1.services.index_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import deployed_index_ref +from google.cloud.aiplatform_v1beta1.types import index +from google.cloud.aiplatform_v1beta1.types import index as gca_index +from google.cloud.aiplatform_v1beta1.types import index_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert IndexServiceClient._get_default_mtls_endpoint(None) is None + assert IndexServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert IndexServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + IndexServiceClient, + IndexServiceAsyncClient, +]) +def test_index_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + IndexServiceClient, + IndexServiceAsyncClient, +]) +def test_index_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.IndexServiceGrpcTransport, "grpc"), + (transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + IndexServiceClient, + IndexServiceAsyncClient, +]) +def test_index_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_index_service_client_get_transport_class(): + transport = IndexServiceClient.get_transport_class() + available_transports = [ + transports.IndexServiceGrpcTransport, + ] + assert transport in available_transports + + transport = IndexServiceClient.get_transport_class("grpc") + assert transport == transports.IndexServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) +@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) +def test_index_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "true"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "false"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) +@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_index_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), + (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_index_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_index_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = IndexServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_index(transport: str = 'grpc', request_type=index_service.CreateIndexRequest): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.CreateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_index_from_dict(): + test_create_index(request_type=dict) + + +def test_create_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + client.create_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.CreateIndexRequest() + + +@pytest.mark.asyncio +async def test_create_index_async(transport: str = 'grpc_asyncio', request_type=index_service.CreateIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.CreateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_index_async_from_dict(): + await test_create_index_async(request_type=dict) + + +def test_create_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.CreateIndexRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.CreateIndexRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_index( + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].index == gca_index.Index(name='name_value') + + +def test_create_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_index( + index_service.CreateIndexRequest(), + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_index( + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].index == gca_index.Index(name='name_value') + + +@pytest.mark.asyncio +async def test_create_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_index( + index_service.CreateIndexRequest(), + parent='parent_value', + index=gca_index.Index(name='name_value'), + ) + + +def test_get_index(transport: str = 'grpc', request_type=index_service.GetIndexRequest): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + ) + response = client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.GetIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index.Index) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +def test_get_index_from_dict(): + test_get_index(request_type=dict) + + +def test_get_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + client.get_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.GetIndexRequest() + + +@pytest.mark.asyncio +async def test_get_index_async(transport: str = 'grpc_asyncio', request_type=index_service.GetIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index.Index( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + etag='etag_value', + )) + response = await client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.GetIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, index.Index) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_index_async_from_dict(): + await test_get_index_async(request_type=dict) + + +def test_get_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.GetIndexRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + call.return_value = index.Index() + client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.GetIndexRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) + await client.get_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_index( + index_service.GetIndexRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index.Index() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_index( + index_service.GetIndexRequest(), + name='name_value', + ) + + +def test_list_indexes(transport: str = 'grpc', request_type=index_service.ListIndexesRequest): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.ListIndexesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_indexes_from_dict(): + test_list_indexes(request_type=dict) + + +def test_list_indexes_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + client.list_indexes() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.ListIndexesRequest() + + +@pytest.mark.asyncio +async def test_list_indexes_async(transport: str = 'grpc_asyncio', request_type=index_service.ListIndexesRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.ListIndexesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListIndexesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_indexes_async_from_dict(): + await test_list_indexes_async(request_type=dict) + + +def test_list_indexes_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.ListIndexesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + call.return_value = index_service.ListIndexesResponse() + client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_indexes_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.ListIndexesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) + await client.list_indexes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_indexes_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_indexes( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_indexes_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_indexes( + index_service.ListIndexesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_indexes_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = index_service.ListIndexesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_indexes( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_indexes_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_indexes( + index_service.ListIndexesRequest(), + parent='parent_value', + ) + + +def test_list_indexes_pager(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_indexes(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, index.Index) + for i in results) + +def test_list_indexes_pages(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + pages = list(client.list_indexes(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_indexes_async_pager(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_indexes(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, index.Index) + for i in responses) + +@pytest.mark.asyncio +async def test_list_indexes_async_pages(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_indexes), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + index.Index(), + ], + next_page_token='abc', + ), + index_service.ListIndexesResponse( + indexes=[], + next_page_token='def', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + ], + next_page_token='ghi', + ), + index_service.ListIndexesResponse( + indexes=[ + index.Index(), + index.Index(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_indexes(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_index(transport: str = 'grpc', request_type=index_service.UpdateIndexRequest): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.UpdateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_index_from_dict(): + test_update_index(request_type=dict) + + +def test_update_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + client.update_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.UpdateIndexRequest() + + +@pytest.mark.asyncio +async def test_update_index_async(transport: str = 'grpc_asyncio', request_type=index_service.UpdateIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.UpdateIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_index_async_from_dict(): + await test_update_index_async(request_type=dict) + + +def test_update_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.UpdateIndexRequest() + + request.index.name = 'index.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index.name=index.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.UpdateIndexRequest() + + request.index.name = 'index.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'index.name=index.name/value', + ) in kw['metadata'] + + +def test_update_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_index( + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].index == gca_index.Index(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_index( + index_service.UpdateIndexRequest(), + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_index( + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].index == gca_index.Index(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_index( + index_service.UpdateIndexRequest(), + index=gca_index.Index(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_index(transport: str = 'grpc', request_type=index_service.DeleteIndexRequest): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.DeleteIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_index_from_dict(): + test_delete_index(request_type=dict) + + +def test_delete_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + client.delete_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.DeleteIndexRequest() + + +@pytest.mark.asyncio +async def test_delete_index_async(transport: str = 'grpc_asyncio', request_type=index_service.DeleteIndexRequest): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_service.DeleteIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_index_async_from_dict(): + await test_delete_index_async(request_type=dict) + + +def test_delete_index_field_headers(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.DeleteIndexRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_index_field_headers_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_service.DeleteIndexRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_index_flattened(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_index_flattened_error(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_index( + index_service.DeleteIndexRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_index_flattened_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_index), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_index( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_index_flattened_error_async(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_index( + index_service.DeleteIndexRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = IndexServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = IndexServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.IndexServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.IndexServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.IndexServiceGrpcTransport, + transports.IndexServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.IndexServiceGrpcTransport, + ) + +def test_index_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.IndexServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_index_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.IndexServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_index', + 'get_index', + 'list_indexes', + 'update_index', + 'delete_index', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_index_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_index_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_index_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.IndexServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_index_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + IndexServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_index_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + IndexServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexServiceGrpcTransport, + transports.IndexServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_index_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.IndexServiceGrpcTransport, + transports.IndexServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_index_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.IndexServiceGrpcTransport, grpc_helpers), + (transports.IndexServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_index_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) +def test_index_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_index_service_host_no_port(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_index_service_host_with_port(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_index_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_index_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.IndexServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) +def test_index_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) +def test_index_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_index_service_grpc_lro_client(): + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_service_grpc_lro_async_client(): + client = IndexServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_index_path(): + project = "squid" + location = "clam" + index = "whelk" + expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) + actual = IndexServiceClient.index_path(project, location, index) + assert expected == actual + + +def test_parse_index_path(): + expected = { + "project": "octopus", + "location": "oyster", + "index": "nudibranch", + } + path = IndexServiceClient.index_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_index_path(path) + assert expected == actual + +def test_index_endpoint_path(): + project = "cuttlefish" + location = "mussel" + index_endpoint = "winkle" + expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) + actual = IndexServiceClient.index_endpoint_path(project, location, index_endpoint) + assert expected == actual + + +def test_parse_index_endpoint_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "index_endpoint": "abalone", + } + path = IndexServiceClient.index_endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_index_endpoint_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = IndexServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = IndexServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = IndexServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = IndexServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = IndexServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = IndexServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = IndexServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = IndexServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = IndexServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = IndexServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = IndexServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: + client = IndexServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = IndexServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py new file mode 100644 index 0000000000..d9d43b0ec5 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -0,0 +1,9019 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceClient +from google.cloud.aiplatform_v1beta1.services.job_service import pagers +from google.cloud.aiplatform_v1beta1.services.job_service import transports +from google.cloud.aiplatform_v1beta1.services.job_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import accelerator_type +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job +from google.cloud.aiplatform_v1beta1.types import completion_stats +from google.cloud.aiplatform_v1beta1.types import custom_job +from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job +from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import job_service +from google.cloud.aiplatform_v1beta1.types import job_state +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job +from google.cloud.aiplatform_v1beta1.types import model_monitoring +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import study +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import money_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert JobServiceClient._get_default_mtls_endpoint(None) is None + assert JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + JobServiceClient, + JobServiceAsyncClient, +]) +def test_job_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + JobServiceClient, + JobServiceAsyncClient, +]) +def test_job_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.JobServiceGrpcTransport, "grpc"), + (transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + JobServiceClient, + JobServiceAsyncClient, +]) +def test_job_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_job_service_client_get_transport_class(): + transport = JobServiceClient.get_transport_class() + available_transports = [ + transports.JobServiceGrpcTransport, + ] + assert transport in available_transports + + transport = JobServiceClient.get_transport_class("grpc") + assert transport == transports.JobServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +def test_job_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) +@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_job_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), + (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_job_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_job_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = JobServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_custom_job(transport: str = 'grpc', request_type=job_service.CreateCustomJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_custom_job_from_dict(): + test_create_custom_job(request_type=dict) + + +def test_create_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + client.create_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + +@pytest.mark.asyncio +async def test_create_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_custom_job_async_from_dict(): + await test_create_custom_job_async(request_type=dict) + + +def test_create_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateCustomJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + call.return_value = gca_custom_job.CustomJob() + client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateCustomJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) + await client.create_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_custom_job( + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].custom_job == gca_custom_job.CustomJob(name='name_value') + + +def test_create_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_custom_job( + job_service.CreateCustomJobRequest(), + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_custom_job.CustomJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_custom_job( + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].custom_job == gca_custom_job.CustomJob(name='name_value') + + +@pytest.mark.asyncio +async def test_create_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_custom_job( + job_service.CreateCustomJobRequest(), + parent='parent_value', + custom_job=gca_custom_job.CustomJob(name='name_value'), + ) + + +def test_get_custom_job(transport: str = 'grpc', request_type=job_service.GetCustomJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_custom_job_from_dict(): + test_get_custom_job(request_type=dict) + + +def test_get_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + client.get_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + +@pytest.mark.asyncio +async def test_get_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob( + name='name_value', + display_name='display_name_value', + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, custom_job.CustomJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_custom_job_async_from_dict(): + await test_get_custom_job_async(request_type=dict) + + +def test_get_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + call.return_value = custom_job.CustomJob() + client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) + await client.get_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_custom_job( + job_service.GetCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = custom_job.CustomJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_custom_job( + job_service.GetCustomJobRequest(), + name='name_value', + ) + + +def test_list_custom_jobs(transport: str = 'grpc', request_type=job_service.ListCustomJobsRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCustomJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_custom_jobs_from_dict(): + test_list_custom_jobs(request_type=dict) + + +def test_list_custom_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + client.list_custom_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + +@pytest.mark.asyncio +async def test_list_custom_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListCustomJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListCustomJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListCustomJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_from_dict(): + await test_list_custom_jobs_async(request_type=dict) + + +def test_list_custom_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListCustomJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + call.return_value = job_service.ListCustomJobsResponse() + client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_custom_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListCustomJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) + await client.list_custom_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_custom_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_custom_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_custom_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_custom_jobs( + job_service.ListCustomJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_custom_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListCustomJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_custom_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_custom_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_custom_jobs( + job_service.ListCustomJobsRequest(), + parent='parent_value', + ) + + +def test_list_custom_jobs_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_custom_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, custom_job.CustomJob) + for i in results) + +def test_list_custom_jobs_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_custom_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_custom_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, custom_job.CustomJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_custom_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_custom_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + next_page_token='abc', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[], + next_page_token='def', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + ], + next_page_token='ghi', + ), + job_service.ListCustomJobsResponse( + custom_jobs=[ + custom_job.CustomJob(), + custom_job.CustomJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_custom_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_custom_job(transport: str = 'grpc', request_type=job_service.DeleteCustomJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_custom_job_from_dict(): + test_delete_custom_job(request_type=dict) + + +def test_delete_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + client.delete_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + +@pytest.mark.asyncio +async def test_delete_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteCustomJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_custom_job_async_from_dict(): + await test_delete_custom_job_async(request_type=dict) + + +def test_delete_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_custom_job( + job_service.DeleteCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_custom_job( + job_service.DeleteCustomJobRequest(), + name='name_value', + ) + + +def test_cancel_custom_job(transport: str = 'grpc', request_type=job_service.CancelCustomJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_custom_job_from_dict(): + test_cancel_custom_job(request_type=dict) + + +def test_cancel_custom_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + client.cancel_custom_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelCustomJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelCustomJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_custom_job_async_from_dict(): + await test_cancel_custom_job_async(request_type=dict) + + +def test_cancel_custom_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + call.return_value = None + client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_custom_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelCustomJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_custom_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_custom_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_custom_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_custom_job( + job_service.CancelCustomJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_custom_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_custom_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_custom_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_custom_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_custom_job( + job_service.CancelCustomJobRequest(), + name='name_value', + ) + + +def test_create_data_labeling_job(transport: str = 'grpc', request_type=job_service.CreateDataLabelingJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + ) + response = client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +def test_create_data_labeling_job_from_dict(): + test_create_data_labeling_job(request_type=dict) + + +def test_create_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + client.create_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + )) + response = await client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_async_from_dict(): + await test_create_data_labeling_job_async(request_type=dict) + + +def test_create_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateDataLabelingJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + call.return_value = gca_data_labeling_job.DataLabelingJob() + client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateDataLabelingJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) + await client.create_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_data_labeling_job( + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].data_labeling_job == gca_data_labeling_job.DataLabelingJob(name='name_value') + + +def test_create_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_data_labeling_job( + job_service.CreateDataLabelingJobRequest(), + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_data_labeling_job.DataLabelingJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_data_labeling_job( + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].data_labeling_job == gca_data_labeling_job.DataLabelingJob(name='name_value') + + +@pytest.mark.asyncio +async def test_create_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_data_labeling_job( + job_service.CreateDataLabelingJobRequest(), + parent='parent_value', + data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), + ) + + +def test_get_data_labeling_job(transport: str = 'grpc', request_type=job_service.GetDataLabelingJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + ) + response = client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +def test_get_data_labeling_job_from_dict(): + test_get_data_labeling_job(request_type=dict) + + +def test_get_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + client.get_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob( + name='name_value', + display_name='display_name_value', + datasets=['datasets_value'], + labeler_count=1375, + instruction_uri='instruction_uri_value', + inputs_schema_uri='inputs_schema_uri_value', + state=job_state.JobState.JOB_STATE_QUEUED, + labeling_progress=1810, + specialist_pools=['specialist_pools_value'], + )) + response = await client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, data_labeling_job.DataLabelingJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.datasets == ['datasets_value'] + assert response.labeler_count == 1375 + assert response.instruction_uri == 'instruction_uri_value' + assert response.inputs_schema_uri == 'inputs_schema_uri_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.labeling_progress == 1810 + assert response.specialist_pools == ['specialist_pools_value'] + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_async_from_dict(): + await test_get_data_labeling_job_async(request_type=dict) + + +def test_get_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + call.return_value = data_labeling_job.DataLabelingJob() + client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) + await client.get_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_data_labeling_job( + job_service.GetDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = data_labeling_job.DataLabelingJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_data_labeling_job( + job_service.GetDataLabelingJobRequest(), + name='name_value', + ) + + +def test_list_data_labeling_jobs(transport: str = 'grpc', request_type=job_service.ListDataLabelingJobsRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataLabelingJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_data_labeling_jobs_from_dict(): + test_list_data_labeling_jobs(request_type=dict) + + +def test_list_data_labeling_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + client.list_data_labeling_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListDataLabelingJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListDataLabelingJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_from_dict(): + await test_list_data_labeling_jobs_async(request_type=dict) + + +def test_list_data_labeling_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListDataLabelingJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + call.return_value = job_service.ListDataLabelingJobsResponse() + client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListDataLabelingJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) + await client.list_data_labeling_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_data_labeling_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_data_labeling_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_data_labeling_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_data_labeling_jobs( + job_service.ListDataLabelingJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListDataLabelingJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_data_labeling_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_data_labeling_jobs( + job_service.ListDataLabelingJobsRequest(), + parent='parent_value', + ) + + +def test_list_data_labeling_jobs_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_data_labeling_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, data_labeling_job.DataLabelingJob) + for i in results) + +def test_list_data_labeling_jobs_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_data_labeling_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_data_labeling_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, data_labeling_job.DataLabelingJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_data_labeling_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_data_labeling_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + next_page_token='abc', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[], + next_page_token='def', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + ], + next_page_token='ghi', + ), + job_service.ListDataLabelingJobsResponse( + data_labeling_jobs=[ + data_labeling_job.DataLabelingJob(), + data_labeling_job.DataLabelingJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_data_labeling_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_data_labeling_job(transport: str = 'grpc', request_type=job_service.DeleteDataLabelingJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_data_labeling_job_from_dict(): + test_delete_data_labeling_job(request_type=dict) + + +def test_delete_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + client.delete_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_async_from_dict(): + await test_delete_data_labeling_job_async(request_type=dict) + + +def test_delete_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_data_labeling_job( + job_service.DeleteDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_data_labeling_job( + job_service.DeleteDataLabelingJobRequest(), + name='name_value', + ) + + +def test_cancel_data_labeling_job(transport: str = 'grpc', request_type=job_service.CancelDataLabelingJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_data_labeling_job_from_dict(): + test_cancel_data_labeling_job(request_type=dict) + + +def test_cancel_data_labeling_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + client.cancel_data_labeling_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelDataLabelingJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelDataLabelingJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_async_from_dict(): + await test_cancel_data_labeling_job_async(request_type=dict) + + +def test_cancel_data_labeling_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + call.return_value = None + client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelDataLabelingJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_data_labeling_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_data_labeling_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_data_labeling_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_data_labeling_job( + job_service.CancelDataLabelingJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_data_labeling_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_data_labeling_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_data_labeling_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_data_labeling_job( + job_service.CancelDataLabelingJobRequest(), + name='name_value', + ) + + +def test_create_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CreateHyperparameterTuningJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_hyperparameter_tuning_job_from_dict(): + test_create_hyperparameter_tuning_job(request_type=dict) + + +def test_create_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + client.create_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_async_from_dict(): + await test_create_hyperparameter_tuning_job_async(request_type=dict) + + +def test_create_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateHyperparameterTuningJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateHyperparameterTuningJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) + await client.create_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_hyperparameter_tuning_job( + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].hyperparameter_tuning_job == gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') + + +def test_create_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_hyperparameter_tuning_job( + job_service.CreateHyperparameterTuningJobRequest(), + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_hyperparameter_tuning_job( + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].hyperparameter_tuning_job == gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') + + +@pytest.mark.asyncio +async def test_create_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_hyperparameter_tuning_job( + job_service.CreateHyperparameterTuningJobRequest(), + parent='parent_value', + hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), + ) + + +def test_get_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.GetHyperparameterTuningJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_hyperparameter_tuning_job_from_dict(): + test_get_hyperparameter_tuning_job(request_type=dict) + + +def test_get_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + client.get_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob( + name='name_value', + display_name='display_name_value', + max_trial_count=1609, + parallel_trial_count=2128, + max_failed_trial_count=2317, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.max_trial_count == 1609 + assert response.parallel_trial_count == 2128 + assert response.max_failed_trial_count == 2317 + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_async_from_dict(): + await test_get_hyperparameter_tuning_job_async(request_type=dict) + + +def test_get_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) + await client.get_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_hyperparameter_tuning_job( + job_service.GetHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_hyperparameter_tuning_job( + job_service.GetHyperparameterTuningJobRequest(), + name='name_value', + ) + + +def test_list_hyperparameter_tuning_jobs(transport: str = 'grpc', request_type=job_service.ListHyperparameterTuningJobsRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHyperparameterTuningJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_hyperparameter_tuning_jobs_from_dict(): + test_list_hyperparameter_tuning_jobs(request_type=dict) + + +def test_list_hyperparameter_tuning_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + client.list_hyperparameter_tuning_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListHyperparameterTuningJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListHyperparameterTuningJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_from_dict(): + await test_list_hyperparameter_tuning_jobs_async(request_type=dict) + + +def test_list_hyperparameter_tuning_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListHyperparameterTuningJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListHyperparameterTuningJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) + await client.list_hyperparameter_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_hyperparameter_tuning_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_hyperparameter_tuning_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_hyperparameter_tuning_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hyperparameter_tuning_jobs( + job_service.ListHyperparameterTuningJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListHyperparameterTuningJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_hyperparameter_tuning_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_hyperparameter_tuning_jobs( + job_service.ListHyperparameterTuningJobsRequest(), + parent='parent_value', + ) + + +def test_list_hyperparameter_tuning_jobs_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_hyperparameter_tuning_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) + for i in results) + +def test_list_hyperparameter_tuning_jobs_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_hyperparameter_tuning_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_hyperparameter_tuning_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hyperparameter_tuning_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='abc', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[], + next_page_token='def', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + next_page_token='ghi', + ), + job_service.ListHyperparameterTuningJobsResponse( + hyperparameter_tuning_jobs=[ + hyperparameter_tuning_job.HyperparameterTuningJob(), + hyperparameter_tuning_job.HyperparameterTuningJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_hyperparameter_tuning_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.DeleteHyperparameterTuningJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_hyperparameter_tuning_job_from_dict(): + test_delete_hyperparameter_tuning_job(request_type=dict) + + +def test_delete_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + client.delete_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_async_from_dict(): + await test_delete_hyperparameter_tuning_job_async(request_type=dict) + + +def test_delete_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_hyperparameter_tuning_job( + job_service.DeleteHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_hyperparameter_tuning_job( + job_service.DeleteHyperparameterTuningJobRequest(), + name='name_value', + ) + + +def test_cancel_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CancelHyperparameterTuningJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_hyperparameter_tuning_job_from_dict(): + test_cancel_hyperparameter_tuning_job(request_type=dict) + + +def test_cancel_hyperparameter_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + client.cancel_hyperparameter_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelHyperparameterTuningJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelHyperparameterTuningJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_async_from_dict(): + await test_cancel_hyperparameter_tuning_job_async(request_type=dict) + + +def test_cancel_hyperparameter_tuning_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = None + client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelHyperparameterTuningJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_hyperparameter_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_hyperparameter_tuning_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_hyperparameter_tuning_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_hyperparameter_tuning_job( + job_service.CancelHyperparameterTuningJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_hyperparameter_tuning_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_hyperparameter_tuning_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_hyperparameter_tuning_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_hyperparameter_tuning_job( + job_service.CancelHyperparameterTuningJobRequest(), + name='name_value', + ) + + +def test_create_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CreateBatchPredictionJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_create_batch_prediction_job_from_dict(): + test_create_batch_prediction_job(request_type=dict) + + +def test_create_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + client.create_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_async_from_dict(): + await test_create_batch_prediction_job_async(request_type=dict) + + +def test_create_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateBatchPredictionJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateBatchPredictionJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) + await client.create_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_batch_prediction_job( + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].batch_prediction_job == gca_batch_prediction_job.BatchPredictionJob(name='name_value') + + +def test_create_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_batch_prediction_job( + job_service.CreateBatchPredictionJobRequest(), + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_batch_prediction_job.BatchPredictionJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_batch_prediction_job( + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].batch_prediction_job == gca_batch_prediction_job.BatchPredictionJob(name='name_value') + + +@pytest.mark.asyncio +async def test_create_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_batch_prediction_job( + job_service.CreateBatchPredictionJobRequest(), + parent='parent_value', + batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), + ) + + +def test_get_batch_prediction_job(transport: str = 'grpc', request_type=job_service.GetBatchPredictionJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + ) + response = client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +def test_get_batch_prediction_job_from_dict(): + test_get_batch_prediction_job(request_type=dict) + + +def test_get_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + client.get_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob( + name='name_value', + display_name='display_name_value', + model='model_value', + generate_explanation=True, + state=job_state.JobState.JOB_STATE_QUEUED, + )) + response = await client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, batch_prediction_job.BatchPredictionJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.model == 'model_value' + assert response.generate_explanation is True + assert response.state == job_state.JobState.JOB_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_async_from_dict(): + await test_get_batch_prediction_job_async(request_type=dict) + + +def test_get_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + call.return_value = batch_prediction_job.BatchPredictionJob() + client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) + await client.get_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_batch_prediction_job( + job_service.GetBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = batch_prediction_job.BatchPredictionJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_batch_prediction_job( + job_service.GetBatchPredictionJobRequest(), + name='name_value', + ) + + +def test_list_batch_prediction_jobs(transport: str = 'grpc', request_type=job_service.ListBatchPredictionJobsRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBatchPredictionJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_batch_prediction_jobs_from_dict(): + test_list_batch_prediction_jobs(request_type=dict) + + +def test_list_batch_prediction_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + client.list_batch_prediction_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListBatchPredictionJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListBatchPredictionJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_from_dict(): + await test_list_batch_prediction_jobs_async(request_type=dict) + + +def test_list_batch_prediction_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListBatchPredictionJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + call.return_value = job_service.ListBatchPredictionJobsResponse() + client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListBatchPredictionJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) + await client.list_batch_prediction_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_batch_prediction_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_batch_prediction_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_batch_prediction_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_batch_prediction_jobs( + job_service.ListBatchPredictionJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListBatchPredictionJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_batch_prediction_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_batch_prediction_jobs( + job_service.ListBatchPredictionJobsRequest(), + parent='parent_value', + ) + + +def test_list_batch_prediction_jobs_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_batch_prediction_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) + for i in results) + +def test_list_batch_prediction_jobs_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_batch_prediction_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_batch_prediction_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_batch_prediction_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_batch_prediction_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='abc', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[], + next_page_token='def', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + ], + next_page_token='ghi', + ), + job_service.ListBatchPredictionJobsResponse( + batch_prediction_jobs=[ + batch_prediction_job.BatchPredictionJob(), + batch_prediction_job.BatchPredictionJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_batch_prediction_job(transport: str = 'grpc', request_type=job_service.DeleteBatchPredictionJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_batch_prediction_job_from_dict(): + test_delete_batch_prediction_job(request_type=dict) + + +def test_delete_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + client.delete_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_async_from_dict(): + await test_delete_batch_prediction_job_async(request_type=dict) + + +def test_delete_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_batch_prediction_job( + job_service.DeleteBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_batch_prediction_job( + job_service.DeleteBatchPredictionJobRequest(), + name='name_value', + ) + + +def test_cancel_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CancelBatchPredictionJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_batch_prediction_job_from_dict(): + test_cancel_batch_prediction_job(request_type=dict) + + +def test_cancel_batch_prediction_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + client.cancel_batch_prediction_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelBatchPredictionJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CancelBatchPredictionJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_async_from_dict(): + await test_cancel_batch_prediction_job_async(request_type=dict) + + +def test_cancel_batch_prediction_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + call.return_value = None + client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CancelBatchPredictionJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_batch_prediction_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_batch_prediction_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_batch_prediction_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_batch_prediction_job( + job_service.CancelBatchPredictionJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_batch_prediction_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_batch_prediction_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_batch_prediction_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_batch_prediction_job( + job_service.CancelBatchPredictionJobRequest(), + name='name_value', + ) + + +def test_create_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + ) + response = client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + + +def test_create_model_deployment_monitoring_job_from_dict(): + test_create_model_deployment_monitoring_job(request_type=dict) + + +def test_create_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + client.create_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + )) + response = await client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_async_from_dict(): + await test_create_model_deployment_monitoring_job_async(request_type=dict) + + +def test_create_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateModelDeploymentMonitoringJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.CreateModelDeploymentMonitoringJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + await client.create_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_model_deployment_monitoring_job( + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + + +def test_create_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_model_deployment_monitoring_job( + job_service.CreateModelDeploymentMonitoringJobRequest(), + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_model_deployment_monitoring_job( + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + + +@pytest.mark.asyncio +async def test_create_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_model_deployment_monitoring_job( + job_service.CreateModelDeploymentMonitoringJobRequest(), + parent='parent_value', + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + ) + + +def test_search_model_deployment_monitoring_stats_anomalies(transport: str = 'grpc', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_model_deployment_monitoring_stats_anomalies_from_dict(): + test_search_model_deployment_monitoring_stats_anomalies(request_type=dict) + + +def test_search_model_deployment_monitoring_stats_anomalies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + client.search_model_deployment_monitoring_stats_anomalies() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async(transport: str = 'grpc_asyncio', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict(): + await test_search_model_deployment_monitoring_stats_anomalies_async(request_type=dict) + + +def test_search_model_deployment_monitoring_stats_anomalies_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + request.model_deployment_monitoring_job = 'model_deployment_monitoring_job/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job=model_deployment_monitoring_job/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() + + request.model_deployment_monitoring_job = 'model_deployment_monitoring_job/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) + await client.search_model_deployment_monitoring_stats_anomalies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job=model_deployment_monitoring_job/value', + ) in kw['metadata'] + + +def test_search_model_deployment_monitoring_stats_anomalies_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_model_deployment_monitoring_stats_anomalies( + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + assert args[0].deployed_model_id == 'deployed_model_id_value' + + +def test_search_model_deployment_monitoring_stats_anomalies_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_model_deployment_monitoring_stats_anomalies( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_model_deployment_monitoring_stats_anomalies( + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' + assert args[0].deployed_model_id == 'deployed_model_id_value' + + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_model_deployment_monitoring_stats_anomalies( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), + model_deployment_monitoring_job='model_deployment_monitoring_job_value', + deployed_model_id='deployed_model_id_value', + ) + + +def test_search_model_deployment_monitoring_stats_anomalies_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('model_deployment_monitoring_job', ''), + )), + ) + pager = client.search_model_deployment_monitoring_stats_anomalies(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) + for i in results) + +def test_search_model_deployment_monitoring_stats_anomalies_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + pages = list(client.search_model_deployment_monitoring_stats_anomalies(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_model_deployment_monitoring_stats_anomalies(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) + for i in responses) + +@pytest.mark.asyncio +async def test_search_model_deployment_monitoring_stats_anomalies_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_model_deployment_monitoring_stats_anomalies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='abc', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[], + next_page_token='def', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + next_page_token='ghi', + ), + job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( + monitoring_stats=[ + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_model_deployment_monitoring_stats_anomalies(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_get_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.GetModelDeploymentMonitoringJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + ) + response = client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + + +def test_get_model_deployment_monitoring_job_from_dict(): + test_get_model_deployment_monitoring_job(request_type=dict) + + +def test_get_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + client.get_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob( + name='name_value', + display_name='display_name_value', + endpoint='endpoint_value', + state=job_state.JobState.JOB_STATE_QUEUED, + schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, + predict_instance_schema_uri='predict_instance_schema_uri_value', + analysis_instance_schema_uri='analysis_instance_schema_uri_value', + )) + response = await client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.endpoint == 'endpoint_value' + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING + assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' + assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_async_from_dict(): + await test_get_model_deployment_monitoring_job_async(request_type=dict) + + +def test_get_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetModelDeploymentMonitoringJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.GetModelDeploymentMonitoringJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + await client.get_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_deployment_monitoring_job( + job_service.GetModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_deployment_monitoring_job( + job_service.GetModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +def test_list_model_deployment_monitoring_jobs(transport: str = 'grpc', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_deployment_monitoring_jobs_from_dict(): + test_list_model_deployment_monitoring_jobs(request_type=dict) + + +def test_list_model_deployment_monitoring_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + client.list_model_deployment_monitoring_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_from_dict(): + await test_list_model_deployment_monitoring_jobs_async(request_type=dict) + + +def test_list_model_deployment_monitoring_jobs_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListModelDeploymentMonitoringJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ListModelDeploymentMonitoringJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) + await client.list_model_deployment_monitoring_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_model_deployment_monitoring_jobs_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_deployment_monitoring_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_model_deployment_monitoring_jobs_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_deployment_monitoring_jobs( + job_service.ListModelDeploymentMonitoringJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_deployment_monitoring_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_deployment_monitoring_jobs( + job_service.ListModelDeploymentMonitoringJobsRequest(), + parent='parent_value', + ) + + +def test_list_model_deployment_monitoring_jobs_pager(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_deployment_monitoring_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + for i in results) + +def test_list_model_deployment_monitoring_jobs_pages(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_pager(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_deployment_monitoring_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_model_deployment_monitoring_jobs_async_pages(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_deployment_monitoring_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='abc', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[], + next_page_token='def', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + next_page_token='ghi', + ), + job_service.ListModelDeploymentMonitoringJobsResponse( + model_deployment_monitoring_jobs=[ + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_deployment_monitoring_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_model_deployment_monitoring_job_from_dict(): + test_update_model_deployment_monitoring_job(request_type=dict) + + +def test_update_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + client.update_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_async_from_dict(): + await test_update_model_deployment_monitoring_job_async(request_type=dict) + + +def test_update_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.UpdateModelDeploymentMonitoringJobRequest() + + request.model_deployment_monitoring_job.name = 'model_deployment_monitoring_job.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.UpdateModelDeploymentMonitoringJobRequest() + + request.model_deployment_monitoring_job.name = 'model_deployment_monitoring_job.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value', + ) in kw['metadata'] + + +def test_update_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model_deployment_monitoring_job( + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model_deployment_monitoring_job( + job_service.UpdateModelDeploymentMonitoringJobRequest(), + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model_deployment_monitoring_job( + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model_deployment_monitoring_job( + job_service.UpdateModelDeploymentMonitoringJobRequest(), + model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_deployment_monitoring_job_from_dict(): + test_delete_model_deployment_monitoring_job(request_type=dict) + + +def test_delete_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + client.delete_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_async_from_dict(): + await test_delete_model_deployment_monitoring_job_async(request_type=dict) + + +def test_delete_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteModelDeploymentMonitoringJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.DeleteModelDeploymentMonitoringJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model_deployment_monitoring_job( + job_service.DeleteModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model_deployment_monitoring_job( + job_service.DeleteModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +def test_pause_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_pause_model_deployment_monitoring_job_from_dict(): + test_pause_model_deployment_monitoring_job(request_type=dict) + + +def test_pause_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + client.pause_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_async_from_dict(): + await test_pause_model_deployment_monitoring_job_async(request_type=dict) + + +def test_pause_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.PauseModelDeploymentMonitoringJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = None + client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.PauseModelDeploymentMonitoringJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.pause_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_pause_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.pause_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_pause_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.pause_model_deployment_monitoring_job( + job_service.PauseModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.pause_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.pause_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_pause_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.pause_model_deployment_monitoring_job( + job_service.PauseModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +def test_resume_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_resume_model_deployment_monitoring_job_from_dict(): + test_resume_model_deployment_monitoring_job(request_type=dict) + + +def test_resume_model_deployment_monitoring_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + client.resume_model_deployment_monitoring_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_async_from_dict(): + await test_resume_model_deployment_monitoring_job_async(request_type=dict) + + +def test_resume_model_deployment_monitoring_job_field_headers(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ResumeModelDeploymentMonitoringJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = None + client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_field_headers_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = job_service.ResumeModelDeploymentMonitoringJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.resume_model_deployment_monitoring_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_resume_model_deployment_monitoring_job_flattened(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.resume_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_resume_model_deployment_monitoring_job_flattened_error(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resume_model_deployment_monitoring_job( + job_service.ResumeModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_flattened_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resume_model_deployment_monitoring_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.resume_model_deployment_monitoring_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_resume_model_deployment_monitoring_job_flattened_error_async(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.resume_model_deployment_monitoring_job( + job_service.ResumeModelDeploymentMonitoringJobRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = JobServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.JobServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.JobServiceGrpcTransport, + ) + +def test_job_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.JobServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_job_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.JobServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_custom_job', + 'get_custom_job', + 'list_custom_jobs', + 'delete_custom_job', + 'cancel_custom_job', + 'create_data_labeling_job', + 'get_data_labeling_job', + 'list_data_labeling_jobs', + 'delete_data_labeling_job', + 'cancel_data_labeling_job', + 'create_hyperparameter_tuning_job', + 'get_hyperparameter_tuning_job', + 'list_hyperparameter_tuning_jobs', + 'delete_hyperparameter_tuning_job', + 'cancel_hyperparameter_tuning_job', + 'create_batch_prediction_job', + 'get_batch_prediction_job', + 'list_batch_prediction_jobs', + 'delete_batch_prediction_job', + 'cancel_batch_prediction_job', + 'create_model_deployment_monitoring_job', + 'search_model_deployment_monitoring_stats_anomalies', + 'get_model_deployment_monitoring_job', + 'list_model_deployment_monitoring_jobs', + 'update_model_deployment_monitoring_job', + 'delete_model_deployment_monitoring_job', + 'pause_model_deployment_monitoring_job', + 'resume_model_deployment_monitoring_job', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_job_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_job_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_job_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.JobServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_job_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_job_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + JobServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_job_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.JobServiceGrpcTransport, + transports.JobServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_job_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.JobServiceGrpcTransport, grpc_helpers), + (transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_job_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_job_service_host_no_port(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_job_service_host_with_port(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_job_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_job_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.JobServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) +def test_job_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_job_service_grpc_lro_client(): + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_job_service_grpc_lro_async_client(): + client = JobServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_batch_prediction_job_path(): + project = "squid" + location = "clam" + batch_prediction_job = "whelk" + expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) + actual = JobServiceClient.batch_prediction_job_path(project, location, batch_prediction_job) + assert expected == actual + + +def test_parse_batch_prediction_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "batch_prediction_job": "nudibranch", + } + path = JobServiceClient.batch_prediction_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_batch_prediction_job_path(path) + assert expected == actual + +def test_custom_job_path(): + project = "cuttlefish" + location = "mussel" + custom_job = "winkle" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = JobServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "custom_job": "abalone", + } + path = JobServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_data_labeling_job_path(): + project = "squid" + location = "clam" + data_labeling_job = "whelk" + expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) + actual = JobServiceClient.data_labeling_job_path(project, location, data_labeling_job) + assert expected == actual + + +def test_parse_data_labeling_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "data_labeling_job": "nudibranch", + } + path = JobServiceClient.data_labeling_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_data_labeling_job_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + location = "mussel" + dataset = "winkle" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = JobServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "dataset": "abalone", + } + path = JobServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = JobServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = JobServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_hyperparameter_tuning_job_path(): + project = "cuttlefish" + location = "mussel" + hyperparameter_tuning_job = "winkle" + expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) + actual = JobServiceClient.hyperparameter_tuning_job_path(project, location, hyperparameter_tuning_job) + assert expected == actual + + +def test_parse_hyperparameter_tuning_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "hyperparameter_tuning_job": "abalone", + } + path = JobServiceClient.hyperparameter_tuning_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path) + assert expected == actual + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = JobServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = JobServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_deployment_monitoring_job_path(): + project = "cuttlefish" + location = "mussel" + model_deployment_monitoring_job = "winkle" + expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) + actual = JobServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) + assert expected == actual + + +def test_parse_model_deployment_monitoring_job_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model_deployment_monitoring_job": "abalone", + } + path = JobServiceClient.model_deployment_monitoring_job_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path) + assert expected == actual + +def test_network_path(): + project = "squid" + network = "clam" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = JobServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = JobServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_network_path(path) + assert expected == actual + +def test_tensorboard_path(): + project = "oyster" + location = "nudibranch" + tensorboard = "cuttlefish" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + actual = JobServiceClient.tensorboard_path(project, location, tensorboard) + assert expected == actual + + +def test_parse_tensorboard_path(): + expected = { + "project": "mussel", + "location": "winkle", + "tensorboard": "nautilus", + } + path = JobServiceClient.tensorboard_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_tensorboard_path(path) + assert expected == actual + +def test_trial_path(): + project = "scallop" + location = "abalone" + study = "squid" + trial = "clam" + expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + actual = JobServiceClient.trial_path(project, location, study, trial) + assert expected == actual + + +def test_parse_trial_path(): + expected = { + "project": "whelk", + "location": "octopus", + "study": "oyster", + "trial": "nudibranch", + } + path = JobServiceClient.trial_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_trial_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = JobServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = JobServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = JobServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = JobServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = JobServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = JobServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = JobServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = JobServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = JobServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = JobServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: + client = JobServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = JobServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py new file mode 100644 index 0000000000..cb3e2ae56d --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py @@ -0,0 +1,8381 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.metadata_service import MetadataServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.metadata_service import MetadataServiceClient +from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers +from google.cloud.aiplatform_v1beta1.services.metadata_service import transports +from google.cloud.aiplatform_v1beta1.services.metadata_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import context as gca_context +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import event +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import execution as gca_execution +from google.cloud.aiplatform_v1beta1.types import lineage_subgraph +from google.cloud.aiplatform_v1beta1.types import metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema +from google.cloud.aiplatform_v1beta1.types import metadata_service +from google.cloud.aiplatform_v1beta1.types import metadata_store +from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MetadataServiceClient._get_default_mtls_endpoint(None) is None + assert MetadataServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + MetadataServiceClient, + MetadataServiceAsyncClient, +]) +def test_metadata_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + MetadataServiceClient, + MetadataServiceAsyncClient, +]) +def test_metadata_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.MetadataServiceGrpcTransport, "grpc"), + (transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_metadata_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + MetadataServiceClient, + MetadataServiceAsyncClient, +]) +def test_metadata_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_metadata_service_client_get_transport_class(): + transport = MetadataServiceClient.get_transport_class() + available_transports = [ + transports.MetadataServiceGrpcTransport, + ] + assert transport in available_transports + + transport = MetadataServiceClient.get_transport_class("grpc") + assert transport == transports.MetadataServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) +@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) +def test_metadata_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "true"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "false"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) +@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_metadata_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_metadata_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), + (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_metadata_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_metadata_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = MetadataServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_metadata_store(transport: str = 'grpc', request_type=metadata_service.CreateMetadataStoreRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_metadata_store_from_dict(): + test_create_metadata_store(request_type=dict) + + +def test_create_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + client.create_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + +@pytest.mark.asyncio +async def test_create_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataStoreRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_metadata_store_async_from_dict(): + await test_create_metadata_store_async(request_type=dict) + + +def test_create_metadata_store_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataStoreRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_metadata_store_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_store( + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].metadata_store == gca_metadata_store.MetadataStore(name='name_value') + assert args[0].metadata_store_id == 'metadata_store_id_value' + + +def test_create_metadata_store_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_store( + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].metadata_store == gca_metadata_store.MetadataStore(name='name_value') + assert args[0].metadata_store_id == 'metadata_store_id_value' + + +@pytest.mark.asyncio +async def test_create_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_store( + metadata_service.CreateMetadataStoreRequest(), + parent='parent_value', + metadata_store=gca_metadata_store.MetadataStore(name='name_value'), + metadata_store_id='metadata_store_id_value', + ) + + +def test_get_metadata_store(transport: str = 'grpc', request_type=metadata_service.GetMetadataStoreRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore( + name='name_value', + description='description_value', + ) + response = client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_store.MetadataStore) + assert response.name == 'name_value' + assert response.description == 'description_value' + + +def test_get_metadata_store_from_dict(): + test_get_metadata_store(request_type=dict) + + +def test_get_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + client.get_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + +@pytest.mark.asyncio +async def test_get_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataStoreRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore( + name='name_value', + description='description_value', + )) + response = await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_store.MetadataStore) + assert response.name == 'name_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_metadata_store_async_from_dict(): + await test_get_metadata_store_async(request_type=dict) + + +def test_get_metadata_store_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + call.return_value = metadata_store.MetadataStore() + client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataStoreRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) + await client.get_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_metadata_store_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_metadata_store_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_store.MetadataStore() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_store( + metadata_service.GetMetadataStoreRequest(), + name='name_value', + ) + + +def test_list_metadata_stores(transport: str = 'grpc', request_type=metadata_service.ListMetadataStoresRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse( + next_page_token='next_page_token_value', + ) + response = client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataStoresPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_metadata_stores_from_dict(): + test_list_metadata_stores(request_type=dict) + + +def test_list_metadata_stores_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + client.list_metadata_stores() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataStoresRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataStoresRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataStoresAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_from_dict(): + await test_list_metadata_stores_async(request_type=dict) + + +def test_list_metadata_stores_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + call.return_value = metadata_service.ListMetadataStoresResponse() + client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_metadata_stores_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataStoresRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) + await client.list_metadata_stores(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_metadata_stores_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_stores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_metadata_stores_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataStoresResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_stores( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_metadata_stores_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_stores( + metadata_service.ListMetadataStoresRequest(), + parent='parent_value', + ) + + +def test_list_metadata_stores_pager(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_metadata_stores(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) + for i in results) + +def test_list_metadata_stores_pages(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_stores(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_stores(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_store.MetadataStore) + for i in responses) + +@pytest.mark.asyncio +async def test_list_metadata_stores_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_stores), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[], + next_page_token='def', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataStoresResponse( + metadata_stores=[ + metadata_store.MetadataStore(), + metadata_store.MetadataStore(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_stores(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_metadata_store(transport: str = 'grpc', request_type=metadata_service.DeleteMetadataStoreRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_metadata_store_from_dict(): + test_delete_metadata_store(request_type=dict) + + +def test_delete_metadata_store_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + client.delete_metadata_store() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + +@pytest.mark.asyncio +async def test_delete_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteMetadataStoreRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteMetadataStoreRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_metadata_store_async_from_dict(): + await test_delete_metadata_store_async(request_type=dict) + + +def test_delete_metadata_store_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_metadata_store_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteMetadataStoreRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_metadata_store(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_metadata_store_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_metadata_store_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_metadata_store), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_metadata_store( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_metadata_store_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_metadata_store( + metadata_service.DeleteMetadataStoreRequest(), + name='name_value', + ) + + +def test_create_artifact(transport: str = 'grpc', request_type=metadata_service.CreateArtifactRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_create_artifact_from_dict(): + test_create_artifact(request_type=dict) + + +def test_create_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + client.create_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + +@pytest.mark.asyncio +async def test_create_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_artifact_async_from_dict(): + await test_create_artifact_async(request_type=dict) + + +def test_create_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + call.return_value = gca_artifact.Artifact() + client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateArtifactRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + await client.create_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_artifact( + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].artifact == gca_artifact.Artifact(name='name_value') + assert args[0].artifact_id == 'artifact_id_value' + + +def test_create_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + +@pytest.mark.asyncio +async def test_create_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_artifact( + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].artifact == gca_artifact.Artifact(name='name_value') + assert args[0].artifact_id == 'artifact_id_value' + + +@pytest.mark.asyncio +async def test_create_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_artifact( + metadata_service.CreateArtifactRequest(), + parent='parent_value', + artifact=gca_artifact.Artifact(name='name_value'), + artifact_id='artifact_id_value', + ) + + +def test_get_artifact(transport: str = 'grpc', request_type=metadata_service.GetArtifactRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_get_artifact_from_dict(): + test_get_artifact(request_type=dict) + + +def test_get_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + client.get_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + +@pytest.mark.asyncio +async def test_get_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_artifact_async_from_dict(): + await test_get_artifact_async(request_type=dict) + + +def test_get_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + call.return_value = artifact.Artifact() + client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetArtifactRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + await client.get_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_artifact( + metadata_service.GetArtifactRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_artifact( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_artifact( + metadata_service.GetArtifactRequest(), + name='name_value', + ) + + +def test_list_artifacts(transport: str = 'grpc', request_type=metadata_service.ListArtifactsRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListArtifactsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_artifacts_from_dict(): + test_list_artifacts(request_type=dict) + + +def test_list_artifacts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + client.list_artifacts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + +@pytest.mark.asyncio +async def test_list_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListArtifactsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListArtifactsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListArtifactsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_artifacts_async_from_dict(): + await test_list_artifacts_async(request_type=dict) + + +def test_list_artifacts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + call.return_value = metadata_service.ListArtifactsResponse() + client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_artifacts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListArtifactsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) + await client.list_artifacts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_artifacts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_artifacts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_artifacts( + metadata_service.ListArtifactsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListArtifactsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_artifacts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_artifacts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_artifacts( + metadata_service.ListArtifactsRequest(), + parent='parent_value', + ) + + +def test_list_artifacts_pager(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_artifacts(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, artifact.Artifact) + for i in results) + +def test_list_artifacts_pages(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + pages = list(client.list_artifacts(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_artifacts_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_artifacts(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, artifact.Artifact) + for i in responses) + +@pytest.mark.asyncio +async def test_list_artifacts_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_artifacts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + artifact.Artifact(), + ], + next_page_token='abc', + ), + metadata_service.ListArtifactsResponse( + artifacts=[], + next_page_token='def', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + ], + next_page_token='ghi', + ), + metadata_service.ListArtifactsResponse( + artifacts=[ + artifact.Artifact(), + artifact.Artifact(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_artifacts(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_artifact(transport: str = 'grpc', request_type=metadata_service.UpdateArtifactRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_update_artifact_from_dict(): + test_update_artifact(request_type=dict) + + +def test_update_artifact_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + client.update_artifact() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + +@pytest.mark.asyncio +async def test_update_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateArtifactRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( + name='name_value', + display_name='display_name_value', + uri='uri_value', + etag='etag_value', + state=gca_artifact.Artifact.State.PENDING, + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateArtifactRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_artifact.Artifact) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.uri == 'uri_value' + assert response.etag == 'etag_value' + assert response.state == gca_artifact.Artifact.State.PENDING + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_update_artifact_async_from_dict(): + await test_update_artifact_async(request_type=dict) + + +def test_update_artifact_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + + request.artifact.name = 'artifact.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + call.return_value = gca_artifact.Artifact() + client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact.name=artifact.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_artifact_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateArtifactRequest() + + request.artifact.name = 'artifact.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + await client.update_artifact(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact.name=artifact.name/value', + ) in kw['metadata'] + + +def test_update_artifact_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_artifact( + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].artifact == gca_artifact.Artifact(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_artifact_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_artifact_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_artifact), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_artifact.Artifact() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_artifact( + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].artifact == gca_artifact.Artifact(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_artifact_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_artifact( + metadata_service.UpdateArtifactRequest(), + artifact=gca_artifact.Artifact(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_create_context(transport: str = 'grpc', request_type=metadata_service.CreateContextRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_create_context_from_dict(): + test_create_context(request_type=dict) + + +def test_create_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + client.create_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + +@pytest.mark.asyncio +async def test_create_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_context_async_from_dict(): + await test_create_context_async(request_type=dict) + + +def test_create_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + call.return_value = gca_context.Context() + client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateContextRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + await client.create_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_context( + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].context == gca_context.Context(name='name_value') + assert args[0].context_id == 'context_id_value' + + +def test_create_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_context( + metadata_service.CreateContextRequest(), + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + +@pytest.mark.asyncio +async def test_create_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_context( + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].context == gca_context.Context(name='name_value') + assert args[0].context_id == 'context_id_value' + + +@pytest.mark.asyncio +async def test_create_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_context( + metadata_service.CreateContextRequest(), + parent='parent_value', + context=gca_context.Context(name='name_value'), + context_id='context_id_value', + ) + + +def test_get_context(transport: str = 'grpc', request_type=metadata_service.GetContextRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_get_context_from_dict(): + test_get_context(request_type=dict) + + +def test_get_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + client.get_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + +@pytest.mark.asyncio +async def test_get_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_context_async_from_dict(): + await test_get_context_async(request_type=dict) + + +def test_get_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + call.return_value = context.Context() + client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetContextRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + await client.get_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_context( + metadata_service.GetContextRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_context( + metadata_service.GetContextRequest(), + name='name_value', + ) + + +def test_list_contexts(transport: str = 'grpc', request_type=metadata_service.ListContextsRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListContextsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_contexts_from_dict(): + test_list_contexts(request_type=dict) + + +def test_list_contexts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + client.list_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + +@pytest.mark.asyncio +async def test_list_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListContextsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListContextsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListContextsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_contexts_async_from_dict(): + await test_list_contexts_async(request_type=dict) + + +def test_list_contexts_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + call.return_value = metadata_service.ListContextsResponse() + client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_contexts_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListContextsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) + await client.list_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_contexts_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_contexts_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_contexts( + metadata_service.ListContextsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_contexts_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListContextsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_contexts( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_contexts_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_contexts( + metadata_service.ListContextsRequest(), + parent='parent_value', + ) + + +def test_list_contexts_pager(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_contexts(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, context.Context) + for i in results) + +def test_list_contexts_pages(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + pages = list(client.list_contexts(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_contexts_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_contexts(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, context.Context) + for i in responses) + +@pytest.mark.asyncio +async def test_list_contexts_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_contexts), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + context.Context(), + ], + next_page_token='abc', + ), + metadata_service.ListContextsResponse( + contexts=[], + next_page_token='def', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + ], + next_page_token='ghi', + ), + metadata_service.ListContextsResponse( + contexts=[ + context.Context(), + context.Context(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_contexts(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_context(transport: str = 'grpc', request_type=metadata_service.UpdateContextRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_update_context_from_dict(): + test_update_context(request_type=dict) + + +def test_update_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + client.update_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + +@pytest.mark.asyncio +async def test_update_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( + name='name_value', + display_name='display_name_value', + etag='etag_value', + parent_contexts=['parent_contexts_value'], + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_context.Context) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.etag == 'etag_value' + assert response.parent_contexts == ['parent_contexts_value'] + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_update_context_async_from_dict(): + await test_update_context_async(request_type=dict) + + +def test_update_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + + request.context.name = 'context.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + call.return_value = gca_context.Context() + client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context.name=context.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateContextRequest() + + request.context.name = 'context.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + await client.update_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context.name=context.name/value', + ) in kw['metadata'] + + +def test_update_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_context( + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].context == gca_context.Context(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_context.Context() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_context( + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].context == gca_context.Context(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_context( + metadata_service.UpdateContextRequest(), + context=gca_context.Context(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_context(transport: str = 'grpc', request_type=metadata_service.DeleteContextRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_context_from_dict(): + test_delete_context(request_type=dict) + + +def test_delete_context_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + client.delete_context() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + +@pytest.mark.asyncio +async def test_delete_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteContextRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.DeleteContextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_context_async_from_dict(): + await test_delete_context_async(request_type=dict) + + +def test_delete_context_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_context_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.DeleteContextRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_context(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_context_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_context_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_context( + metadata_service.DeleteContextRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_context_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_context), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_context( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_context_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_context( + metadata_service.DeleteContextRequest(), + name='name_value', + ) + + +def test_add_context_artifacts_and_executions(transport: str = 'grpc', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse( + ) + response = client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) + + +def test_add_context_artifacts_and_executions_from_dict(): + test_add_context_artifacts_and_executions(request_type=dict) + + +def test_add_context_artifacts_and_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + client.add_context_artifacts_and_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse( + )) + response = await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_async_from_dict(): + await test_add_context_artifacts_and_executions_async(request_type=dict) + + +def test_add_context_artifacts_and_executions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + + request.context = 'context/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextArtifactsAndExecutionsRequest() + + request.context = 'context/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) + await client.add_context_artifacts_and_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context/value', + ) in kw['metadata'] + + +def test_add_context_artifacts_and_executions_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_artifacts_and_executions( + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].context == 'context_value' + assert args[0].artifacts == ['artifacts_value'] + assert args[0].executions == ['executions_value'] + + +def test_add_context_artifacts_and_executions_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_artifacts_and_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_artifacts_and_executions( + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].context == 'context_value' + assert args[0].artifacts == ['artifacts_value'] + assert args[0].executions == ['executions_value'] + + +@pytest.mark.asyncio +async def test_add_context_artifacts_and_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_artifacts_and_executions( + metadata_service.AddContextArtifactsAndExecutionsRequest(), + context='context_value', + artifacts=['artifacts_value'], + executions=['executions_value'], + ) + + +def test_add_context_children(transport: str = 'grpc', request_type=metadata_service.AddContextChildrenRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse( + ) + response = client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +def test_add_context_children_from_dict(): + test_add_context_children(request_type=dict) + + +def test_add_context_children_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + client.add_context_children() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + +@pytest.mark.asyncio +async def test_add_context_children_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextChildrenRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse( + )) + response = await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddContextChildrenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddContextChildrenResponse) + + +@pytest.mark.asyncio +async def test_add_context_children_async_from_dict(): + await test_add_context_children_async(request_type=dict) + + +def test_add_context_children_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + + request.context = 'context/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + call.return_value = metadata_service.AddContextChildrenResponse() + client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_context_children_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddContextChildrenRequest() + + request.context = 'context/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) + await client.add_context_children(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context/value', + ) in kw['metadata'] + + +def test_add_context_children_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_context_children( + context='context_value', + child_contexts=['child_contexts_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].context == 'context_value' + assert args[0].child_contexts == ['child_contexts_value'] + + +def test_add_context_children_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context='context_value', + child_contexts=['child_contexts_value'], + ) + + +@pytest.mark.asyncio +async def test_add_context_children_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_context_children), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddContextChildrenResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_context_children( + context='context_value', + child_contexts=['child_contexts_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].context == 'context_value' + assert args[0].child_contexts == ['child_contexts_value'] + + +@pytest.mark.asyncio +async def test_add_context_children_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_context_children( + metadata_service.AddContextChildrenRequest(), + context='context_value', + child_contexts=['child_contexts_value'], + ) + + +def test_query_context_lineage_subgraph(transport: str = 'grpc', request_type=metadata_service.QueryContextLineageSubgraphRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph( + ) + response = client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_context_lineage_subgraph_from_dict(): + test_query_context_lineage_subgraph(request_type=dict) + + +def test_query_context_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + client.query_context_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryContextLineageSubgraphRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( + )) + response = await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_async_from_dict(): + await test_query_context_lineage_subgraph_async(request_type=dict) + + +def test_query_context_lineage_subgraph_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + + request.context = 'context/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryContextLineageSubgraphRequest() + + request.context = 'context/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + await client.query_context_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'context=context/value', + ) in kw['metadata'] + + +def test_query_context_lineage_subgraph_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_context_lineage_subgraph( + context='context_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].context == 'context_value' + + +def test_query_context_lineage_subgraph_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context='context_value', + ) + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_context_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_context_lineage_subgraph( + context='context_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].context == 'context_value' + + +@pytest.mark.asyncio +async def test_query_context_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_context_lineage_subgraph( + metadata_service.QueryContextLineageSubgraphRequest(), + context='context_value', + ) + + +def test_create_execution(transport: str = 'grpc', request_type=metadata_service.CreateExecutionRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_create_execution_from_dict(): + test_create_execution(request_type=dict) + + +def test_create_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + client.create_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + +@pytest.mark.asyncio +async def test_create_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_execution_async_from_dict(): + await test_create_execution_async(request_type=dict) + + +def test_create_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + call.return_value = gca_execution.Execution() + client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateExecutionRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + await client.create_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_execution( + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].execution == gca_execution.Execution(name='name_value') + assert args[0].execution_id == 'execution_id_value' + + +def test_create_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_execution( + metadata_service.CreateExecutionRequest(), + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + +@pytest.mark.asyncio +async def test_create_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_execution( + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].execution == gca_execution.Execution(name='name_value') + assert args[0].execution_id == 'execution_id_value' + + +@pytest.mark.asyncio +async def test_create_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_execution( + metadata_service.CreateExecutionRequest(), + parent='parent_value', + execution=gca_execution.Execution(name='name_value'), + execution_id='execution_id_value', + ) + + +def test_get_execution(transport: str = 'grpc', request_type=metadata_service.GetExecutionRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution( + name='name_value', + display_name='display_name_value', + state=execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_get_execution_from_dict(): + test_get_execution(request_type=dict) + + +def test_get_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + client.get_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + +@pytest.mark.asyncio +async def test_get_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution( + name='name_value', + display_name='display_name_value', + state=execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_execution_async_from_dict(): + await test_get_execution_async(request_type=dict) + + +def test_get_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + call.return_value = execution.Execution() + client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetExecutionRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + await client.get_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_execution( + metadata_service.GetExecutionRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_execution( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_execution( + metadata_service.GetExecutionRequest(), + name='name_value', + ) + + +def test_list_executions(transport: str = 'grpc', request_type=metadata_service.ListExecutionsRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExecutionsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_executions_from_dict(): + test_list_executions(request_type=dict) + + +def test_list_executions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + client.list_executions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + +@pytest.mark.asyncio +async def test_list_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListExecutionsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListExecutionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExecutionsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_executions_async_from_dict(): + await test_list_executions_async(request_type=dict) + + +def test_list_executions_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + call.return_value = metadata_service.ListExecutionsResponse() + client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_executions_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListExecutionsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) + await client.list_executions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_executions_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_executions_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_executions( + metadata_service.ListExecutionsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_executions_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListExecutionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_executions( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_executions_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_executions( + metadata_service.ListExecutionsRequest(), + parent='parent_value', + ) + + +def test_list_executions_pager(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_executions(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, execution.Execution) + for i in results) + +def test_list_executions_pages(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + pages = list(client.list_executions(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_executions_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_executions(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, execution.Execution) + for i in responses) + +@pytest.mark.asyncio +async def test_list_executions_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_executions), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + execution.Execution(), + ], + next_page_token='abc', + ), + metadata_service.ListExecutionsResponse( + executions=[], + next_page_token='def', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + ], + next_page_token='ghi', + ), + metadata_service.ListExecutionsResponse( + executions=[ + execution.Execution(), + execution.Execution(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_executions(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_execution(transport: str = 'grpc', request_type=metadata_service.UpdateExecutionRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + ) + response = client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +def test_update_execution_from_dict(): + test_update_execution(request_type=dict) + + +def test_update_execution_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + client.update_execution() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + +@pytest.mark.asyncio +async def test_update_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateExecutionRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( + name='name_value', + display_name='display_name_value', + state=gca_execution.Execution.State.NEW, + etag='etag_value', + schema_title='schema_title_value', + schema_version='schema_version_value', + description='description_value', + )) + response = await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.UpdateExecutionRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_execution.Execution) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_execution.Execution.State.NEW + assert response.etag == 'etag_value' + assert response.schema_title == 'schema_title_value' + assert response.schema_version == 'schema_version_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_update_execution_async_from_dict(): + await test_update_execution_async(request_type=dict) + + +def test_update_execution_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + + request.execution.name = 'execution.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + call.return_value = gca_execution.Execution() + client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution.name=execution.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_execution_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.UpdateExecutionRequest() + + request.execution.name = 'execution.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + await client.update_execution(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution.name=execution.name/value', + ) in kw['metadata'] + + +def test_update_execution_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_execution( + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].execution == gca_execution.Execution(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_execution_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_execution_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_execution), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_execution.Execution() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_execution( + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].execution == gca_execution.Execution(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_execution_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_execution( + metadata_service.UpdateExecutionRequest(), + execution=gca_execution.Execution(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_add_execution_events(transport: str = 'grpc', request_type=metadata_service.AddExecutionEventsRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse( + ) + response = client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +def test_add_execution_events_from_dict(): + test_add_execution_events(request_type=dict) + + +def test_add_execution_events_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + client.add_execution_events() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + +@pytest.mark.asyncio +async def test_add_execution_events_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddExecutionEventsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse( + )) + response = await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.AddExecutionEventsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_service.AddExecutionEventsResponse) + + +@pytest.mark.asyncio +async def test_add_execution_events_async_from_dict(): + await test_add_execution_events_async(request_type=dict) + + +def test_add_execution_events_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + + request.execution = 'execution/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + call.return_value = metadata_service.AddExecutionEventsResponse() + client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_execution_events_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.AddExecutionEventsRequest() + + request.execution = 'execution/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) + await client.add_execution_events(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution/value', + ) in kw['metadata'] + + +def test_add_execution_events_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.add_execution_events( + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].execution == 'execution_value' + assert args[0].events == [event.Event(artifact='artifact_value')] + + +def test_add_execution_events_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_execution_events), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.AddExecutionEventsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.add_execution_events( + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].execution == 'execution_value' + assert args[0].events == [event.Event(artifact='artifact_value')] + + +@pytest.mark.asyncio +async def test_add_execution_events_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.add_execution_events( + metadata_service.AddExecutionEventsRequest(), + execution='execution_value', + events=[event.Event(artifact='artifact_value')], + ) + + +def test_query_execution_inputs_and_outputs(transport: str = 'grpc', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph( + ) + response = client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_execution_inputs_and_outputs_from_dict(): + test_query_execution_inputs_and_outputs(request_type=dict) + + +def test_query_execution_inputs_and_outputs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + client.query_execution_inputs_and_outputs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( + )) + response = await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_async_from_dict(): + await test_query_execution_inputs_and_outputs_async(request_type=dict) + + +def test_query_execution_inputs_and_outputs_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + + request.execution = 'execution/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryExecutionInputsAndOutputsRequest() + + request.execution = 'execution/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + await client.query_execution_inputs_and_outputs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'execution=execution/value', + ) in kw['metadata'] + + +def test_query_execution_inputs_and_outputs_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_execution_inputs_and_outputs( + execution='execution_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].execution == 'execution_value' + + +def test_query_execution_inputs_and_outputs_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution='execution_value', + ) + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_execution_inputs_and_outputs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_execution_inputs_and_outputs( + execution='execution_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].execution == 'execution_value' + + +@pytest.mark.asyncio +async def test_query_execution_inputs_and_outputs_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_execution_inputs_and_outputs( + metadata_service.QueryExecutionInputsAndOutputsRequest(), + execution='execution_value', + ) + + +def test_create_metadata_schema(transport: str = 'grpc', request_type=metadata_service.CreateMetadataSchemaRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + ) + response = client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +def test_create_metadata_schema_from_dict(): + test_create_metadata_schema(request_type=dict) + + +def test_create_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + client.create_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + +@pytest.mark.asyncio +async def test_create_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataSchemaRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + )) + response = await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.CreateMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_metadata_schema_async_from_dict(): + await test_create_metadata_schema_async(request_type=dict) + + +def test_create_metadata_schema_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + call.return_value = gca_metadata_schema.MetadataSchema() + client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.CreateMetadataSchemaRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) + await client.create_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_metadata_schema_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_metadata_schema( + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema(name='name_value') + assert args[0].metadata_schema_id == 'metadata_schema_id_value' + + +def test_create_metadata_schema_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_metadata_schema( + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema(name='name_value') + assert args[0].metadata_schema_id == 'metadata_schema_id_value' + + +@pytest.mark.asyncio +async def test_create_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_metadata_schema( + metadata_service.CreateMetadataSchemaRequest(), + parent='parent_value', + metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), + metadata_schema_id='metadata_schema_id_value', + ) + + +def test_get_metadata_schema(transport: str = 'grpc', request_type=metadata_service.GetMetadataSchemaRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + ) + response = client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +def test_get_metadata_schema_from_dict(): + test_get_metadata_schema(request_type=dict) + + +def test_get_metadata_schema_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + client.get_metadata_schema() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + +@pytest.mark.asyncio +async def test_get_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataSchemaRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema( + name='name_value', + schema_version='schema_version_value', + schema='schema_value', + schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, + description='description_value', + )) + response = await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.GetMetadataSchemaRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, metadata_schema.MetadataSchema) + assert response.name == 'name_value' + assert response.schema_version == 'schema_version_value' + assert response.schema == 'schema_value' + assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_metadata_schema_async_from_dict(): + await test_get_metadata_schema_async(request_type=dict) + + +def test_get_metadata_schema_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + call.return_value = metadata_schema.MetadataSchema() + client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_metadata_schema_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.GetMetadataSchemaRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) + await client.get_metadata_schema(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_metadata_schema_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_metadata_schema( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_metadata_schema_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_metadata_schema), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_schema.MetadataSchema() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_metadata_schema( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_metadata_schema_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_metadata_schema( + metadata_service.GetMetadataSchemaRequest(), + name='name_value', + ) + + +def test_list_metadata_schemas(transport: str = 'grpc', request_type=metadata_service.ListMetadataSchemasRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse( + next_page_token='next_page_token_value', + ) + response = client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataSchemasPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_metadata_schemas_from_dict(): + test_list_metadata_schemas(request_type=dict) + + +def test_list_metadata_schemas_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + client.list_metadata_schemas() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataSchemasRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.ListMetadataSchemasRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMetadataSchemasAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_from_dict(): + await test_list_metadata_schemas_async(request_type=dict) + + +def test_list_metadata_schemas_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + call.return_value = metadata_service.ListMetadataSchemasResponse() + client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.ListMetadataSchemasRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) + await client.list_metadata_schemas(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_metadata_schemas_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_metadata_schemas( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_metadata_schemas_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = metadata_service.ListMetadataSchemasResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_metadata_schemas( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_metadata_schemas_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_metadata_schemas( + metadata_service.ListMetadataSchemasRequest(), + parent='parent_value', + ) + + +def test_list_metadata_schemas_pager(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_metadata_schemas(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) + for i in results) + +def test_list_metadata_schemas_pages(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = list(client.list_metadata_schemas(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pager(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_metadata_schemas(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, metadata_schema.MetadataSchema) + for i in responses) + +@pytest.mark.asyncio +async def test_list_metadata_schemas_async_pages(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_metadata_schemas), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + next_page_token='abc', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[], + next_page_token='def', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + ], + next_page_token='ghi', + ), + metadata_service.ListMetadataSchemasResponse( + metadata_schemas=[ + metadata_schema.MetadataSchema(), + metadata_schema.MetadataSchema(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_metadata_schemas(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_query_artifact_lineage_subgraph(transport: str = 'grpc', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph( + ) + response = client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +def test_query_artifact_lineage_subgraph_from_dict(): + test_query_artifact_lineage_subgraph(request_type=dict) + + +def test_query_artifact_lineage_subgraph_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + client.query_artifact_lineage_subgraph() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( + )) + response = await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, lineage_subgraph.LineageSubgraph) + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_async_from_dict(): + await test_query_artifact_lineage_subgraph_async(request_type=dict) + + +def test_query_artifact_lineage_subgraph_field_headers(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + + request.artifact = 'artifact/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + call.return_value = lineage_subgraph.LineageSubgraph() + client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact=artifact/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_field_headers_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = metadata_service.QueryArtifactLineageSubgraphRequest() + + request.artifact = 'artifact/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + await client.query_artifact_lineage_subgraph(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'artifact=artifact/value', + ) in kw['metadata'] + + +def test_query_artifact_lineage_subgraph_flattened(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_artifact_lineage_subgraph( + artifact='artifact_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].artifact == 'artifact_value' + + +def test_query_artifact_lineage_subgraph_flattened_error(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact='artifact_value', + ) + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.query_artifact_lineage_subgraph), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = lineage_subgraph.LineageSubgraph() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_artifact_lineage_subgraph( + artifact='artifact_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].artifact == 'artifact_value' + + +@pytest.mark.asyncio +async def test_query_artifact_lineage_subgraph_flattened_error_async(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_artifact_lineage_subgraph( + metadata_service.QueryArtifactLineageSubgraphRequest(), + artifact='artifact_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MetadataServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = MetadataServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MetadataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MetadataServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.MetadataServiceGrpcTransport, + ) + +def test_metadata_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.MetadataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_metadata_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.MetadataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_metadata_store', + 'get_metadata_store', + 'list_metadata_stores', + 'delete_metadata_store', + 'create_artifact', + 'get_artifact', + 'list_artifacts', + 'update_artifact', + 'create_context', + 'get_context', + 'list_contexts', + 'update_context', + 'delete_context', + 'add_context_artifacts_and_executions', + 'add_context_children', + 'query_context_lineage_subgraph', + 'create_execution', + 'get_execution', + 'list_executions', + 'update_execution', + 'add_execution_events', + 'query_execution_inputs_and_outputs', + 'create_metadata_schema', + 'get_metadata_schema', + 'list_metadata_schemas', + 'query_artifact_lineage_subgraph', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_metadata_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_metadata_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_metadata_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MetadataServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_metadata_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MetadataServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_metadata_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MetadataServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_metadata_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MetadataServiceGrpcTransport, + transports.MetadataServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_metadata_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.MetadataServiceGrpcTransport, grpc_helpers), + (transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_metadata_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) +def test_metadata_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_metadata_service_host_no_port(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_metadata_service_host_with_port(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_metadata_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_metadata_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MetadataServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) +def test_metadata_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) +def test_metadata_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_metadata_service_grpc_lro_client(): + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_metadata_service_grpc_lro_async_client(): + client = MetadataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_artifact_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + actual = MetadataServiceClient.artifact_path(project, location, metadata_store, artifact) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = MetadataServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_artifact_path(path) + assert expected == actual + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + actual = MetadataServiceClient.context_path(project, location, metadata_store, context) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = MetadataServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_context_path(path) + assert expected == actual + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + actual = MetadataServiceClient.execution_path(project, location, metadata_store, execution) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = MetadataServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_execution_path(path) + assert expected == actual + +def test_metadata_schema_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + metadata_schema = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) + actual = MetadataServiceClient.metadata_schema_path(project, location, metadata_store, metadata_schema) + assert expected == actual + + +def test_parse_metadata_schema_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "metadata_schema": "mussel", + } + path = MetadataServiceClient.metadata_schema_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_schema_path(path) + assert expected == actual + +def test_metadata_store_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) + actual = MetadataServiceClient.metadata_store_path(project, location, metadata_store) + assert expected == actual + + +def test_parse_metadata_store_path(): + expected = { + "project": "abalone", + "location": "squid", + "metadata_store": "clam", + } + path = MetadataServiceClient.metadata_store_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_metadata_store_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = MetadataServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = MetadataServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format(folder=folder, ) + actual = MetadataServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = MetadataServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format(organization=organization, ) + actual = MetadataServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = MetadataServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format(project=project, ) + actual = MetadataServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = MetadataServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = MetadataServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = MetadataServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MetadataServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: + client = MetadataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = MetadataServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py new file mode 100644 index 0000000000..c99db01922 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -0,0 +1,1753 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.migration_service import MigrationServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.migration_service import MigrationServiceClient +from google.cloud.aiplatform_v1beta1.services.migration_service import pagers +from google.cloud.aiplatform_v1beta1.services.migration_service import transports +from google.cloud.aiplatform_v1beta1.services.migration_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import migratable_resource +from google.cloud.aiplatform_v1beta1.types import migration_service +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MigrationServiceClient._get_default_mtls_endpoint(None) is None + assert MigrationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert MigrationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + MigrationServiceClient, + MigrationServiceAsyncClient, +]) +def test_migration_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + MigrationServiceClient, + MigrationServiceAsyncClient, +]) +def test_migration_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.MigrationServiceGrpcTransport, "grpc"), + (transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + MigrationServiceClient, + MigrationServiceAsyncClient, +]) +def test_migration_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_migration_service_client_get_transport_class(): + transport = MigrationServiceClient.get_transport_class() + available_transports = [ + transports.MigrationServiceGrpcTransport, + ] + assert transport in available_transports + + transport = MigrationServiceClient.get_transport_class("grpc") + assert transport == transports.MigrationServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +def test_migration_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "true"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "false"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) +@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_migration_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), + (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_migration_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_migration_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = MigrationServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_search_migratable_resources(transport: str = 'grpc', request_type=migration_service.SearchMigratableResourcesRequest): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse( + next_page_token='next_page_token_value', + ) + response = client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchMigratableResourcesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_search_migratable_resources_from_dict(): + test_search_migratable_resources(request_type=dict) + + +def test_search_migratable_resources_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + client.search_migratable_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + +@pytest.mark.asyncio +async def test_search_migratable_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.SearchMigratableResourcesRequest): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse( + next_page_token='next_page_token_value', + )) + response = await client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.SearchMigratableResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_from_dict(): + await test_search_migratable_resources_async(request_type=dict) + + +def test_search_migratable_resources_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.SearchMigratableResourcesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + call.return_value = migration_service.SearchMigratableResourcesResponse() + client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_search_migratable_resources_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.SearchMigratableResourcesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) + await client.search_migratable_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_search_migratable_resources_flattened(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.search_migratable_resources( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_search_migratable_resources_flattened_error(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.search_migratable_resources( + migration_service.SearchMigratableResourcesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_search_migratable_resources_flattened_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = migration_service.SearchMigratableResourcesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.search_migratable_resources( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_search_migratable_resources_flattened_error_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.search_migratable_resources( + migration_service.SearchMigratableResourcesRequest(), + parent='parent_value', + ) + + +def test_search_migratable_resources_pager(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.search_migratable_resources(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, migratable_resource.MigratableResource) + for i in results) + +def test_search_migratable_resources_pages(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + pages = list(client.search_migratable_resources(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_pager(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + async_pager = await client.search_migratable_resources(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, migratable_resource.MigratableResource) + for i in responses) + +@pytest.mark.asyncio +async def test_search_migratable_resources_async_pages(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.search_migratable_resources), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + next_page_token='abc', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[], + next_page_token='def', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + ], + next_page_token='ghi', + ), + migration_service.SearchMigratableResourcesResponse( + migratable_resources=[ + migratable_resource.MigratableResource(), + migratable_resource.MigratableResource(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.search_migratable_resources(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_batch_migrate_resources(transport: str = 'grpc', request_type=migration_service.BatchMigrateResourcesRequest): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_batch_migrate_resources_from_dict(): + test_batch_migrate_resources(request_type=dict) + + +def test_batch_migrate_resources_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + client.batch_migrate_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.BatchMigrateResourcesRequest): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == migration_service.BatchMigrateResourcesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_async_from_dict(): + await test_batch_migrate_resources_async(request_type=dict) + + +def test_batch_migrate_resources_field_headers(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.BatchMigrateResourcesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_field_headers_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = migration_service.BatchMigrateResourcesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.batch_migrate_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_batch_migrate_resources_flattened(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_migrate_resources( + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].migrate_resource_requests == [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] + + +def test_batch_migrate_resources_flattened_error(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_migrate_resources( + migration_service.BatchMigrateResourcesRequest(), + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_flattened_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.batch_migrate_resources), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_migrate_resources( + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].migrate_resource_requests == [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] + + +@pytest.mark.asyncio +async def test_batch_migrate_resources_flattened_error_async(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_migrate_resources( + migration_service.BatchMigrateResourcesRequest(), + parent='parent_value', + migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MigrationServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = MigrationServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.MigrationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.MigrationServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.MigrationServiceGrpcTransport, + ) + +def test_migration_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.MigrationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_migration_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.MigrationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'search_migratable_resources', + 'batch_migrate_resources', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_migration_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_migration_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_migration_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MigrationServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_migration_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MigrationServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_migration_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MigrationServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_migration_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.MigrationServiceGrpcTransport, + transports.MigrationServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_migration_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.MigrationServiceGrpcTransport, grpc_helpers), + (transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_migration_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_migration_service_host_no_port(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_migration_service_host_with_port(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_migration_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MigrationServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_migration_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.MigrationServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) +def test_migration_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_migration_service_grpc_lro_client(): + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_migration_service_grpc_lro_async_client(): + client = MigrationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_annotated_dataset_path(): + project = "squid" + dataset = "clam" + annotated_dataset = "whelk" + expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) + actual = MigrationServiceClient.annotated_dataset_path(project, dataset, annotated_dataset) + assert expected == actual + + +def test_parse_annotated_dataset_path(): + expected = { + "project": "octopus", + "dataset": "oyster", + "annotated_dataset": "nudibranch", + } + path = MigrationServiceClient.annotated_dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_annotated_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "winkle", + "dataset": "nautilus", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "clam", + "location": "whelk", + "dataset": "octopus", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_dataset_path(): + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) + actual = MigrationServiceClient.dataset_path(project, location, dataset) + assert expected == actual + + +def test_parse_dataset_path(): + expected = { + "project": "mussel", + "location": "winkle", + "dataset": "nautilus", + } + path = MigrationServiceClient.dataset_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_dataset_path(path) + assert expected == actual + +def test_model_path(): + project = "scallop" + location = "abalone" + model = "squid" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = MigrationServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "clam", + "location": "whelk", + "model": "octopus", + } + path = MigrationServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_path(): + project = "oyster" + location = "nudibranch" + model = "cuttlefish" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = MigrationServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "mussel", + "location": "winkle", + "model": "nautilus", + } + path = MigrationServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_model_path(path) + assert expected == actual + +def test_version_path(): + project = "scallop" + model = "abalone" + version = "squid" + expected = "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) + actual = MigrationServiceClient.version_path(project, model, version) + assert expected == actual + + +def test_parse_version_path(): + expected = { + "project": "clam", + "model": "whelk", + "version": "octopus", + } + path = MigrationServiceClient.version_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_version_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = MigrationServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = MigrationServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format(folder=folder, ) + actual = MigrationServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = MigrationServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format(organization=organization, ) + actual = MigrationServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = MigrationServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format(project=project, ) + actual = MigrationServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = MigrationServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = MigrationServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = MigrationServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MigrationServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: + client = MigrationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = MigrationServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py new file mode 100644 index 0000000000..532ecafb91 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py @@ -0,0 +1,4053 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.model_service import ModelServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.model_service import ModelServiceClient +from google.cloud.aiplatform_v1beta1.services.model_service import pagers +from google.cloud.aiplatform_v1beta1.services.model_service import transports +from google.cloud.aiplatform_v1beta1.services.model_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import model as gca_model +from google.cloud.aiplatform_v1beta1.types import model_evaluation +from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice +from google.cloud.aiplatform_v1beta1.types import model_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ModelServiceClient._get_default_mtls_endpoint(None) is None + assert ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ModelServiceClient, + ModelServiceAsyncClient, +]) +def test_model_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + ModelServiceClient, + ModelServiceAsyncClient, +]) +def test_model_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ModelServiceGrpcTransport, "grpc"), + (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + ModelServiceClient, + ModelServiceAsyncClient, +]) +def test_model_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_model_service_client_get_transport_class(): + transport = ModelServiceClient.get_transport_class() + available_transports = [ + transports.ModelServiceGrpcTransport, + ] + assert transport in available_transports + + transport = ModelServiceClient.get_transport_class("grpc") + assert transport == transports.ModelServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +def test_model_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) +@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_model_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), + (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_model_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_model_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = ModelServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_upload_model(transport: str = 'grpc', request_type=model_service.UploadModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_upload_model_from_dict(): + test_upload_model(request_type=dict) + + +def test_upload_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + client.upload_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + +@pytest.mark.asyncio +async def test_upload_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UploadModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UploadModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_upload_model_async_from_dict(): + await test_upload_model_async(request_type=dict) + + +def test_upload_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UploadModelRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_upload_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UploadModelRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.upload_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_upload_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.upload_model( + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].model == gca_model.Model(name='name_value') + + +def test_upload_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upload_model( + model_service.UploadModelRequest(), + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_upload_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upload_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.upload_model( + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].model == gca_model.Model(name='name_value') + + +@pytest.mark.asyncio +async def test_upload_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.upload_model( + model_service.UploadModelRequest(), + parent='parent_value', + model=gca_model.Model(name='name_value'), + ) + + +def test_get_model(transport: str = 'grpc', request_type=model_service.GetModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_get_model_from_dict(): + test_get_model(request_type=dict) + + +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + +@pytest.mark.asyncio +async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_model_async_from_dict(): + await test_get_model_async(request_type=dict) + + +def test_get_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = model.Model() + client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + await client.get_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model( + model_service.GetModelRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model( + model_service.GetModelRequest(), + name='name_value', + ) + + +def test_list_models(transport: str = 'grpc', request_type=model_service.ListModelsRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_models_from_dict(): + test_list_models(request_type=dict) + + +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + +@pytest.mark.asyncio +async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_models_async_from_dict(): + await test_list_models_async(request_type=dict) + + +def test_list_models_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = model_service.ListModelsResponse() + client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_models_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) + await client.list_models(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_models_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_models_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_models( + model_service.ListModelsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_models_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_models( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_models_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_models( + model_service.ListModelsRequest(), + parent='parent_value', + ) + + +def test_list_models_pager(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_models(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model.Model) + for i in results) + +def test_list_models_pages(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = list(client.list_models(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_models_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_models(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model.Model) + for i in responses) + +@pytest.mark.asyncio +async def test_list_models_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_models), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + model.Model(), + ], + next_page_token='abc', + ), + model_service.ListModelsResponse( + models=[], + next_page_token='def', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + ], + next_page_token='ghi', + ), + model_service.ListModelsResponse( + models=[ + model.Model(), + model.Model(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_models(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_update_model(transport: str = 'grpc', request_type=model_service.UpdateModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + ) + response = client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +def test_update_model_from_dict(): + test_update_model(request_type=dict) + + +def test_update_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + client.update_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + +@pytest.mark.asyncio +async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UpdateModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( + name='name_value', + display_name='display_name_value', + description='description_value', + metadata_schema_uri='metadata_schema_uri_value', + training_pipeline='training_pipeline_value', + artifact_uri='artifact_uri_value', + supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], + supported_input_storage_formats=['supported_input_storage_formats_value'], + supported_output_storage_formats=['supported_output_storage_formats_value'], + etag='etag_value', + )) + response = await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.UpdateModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_model.Model) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.metadata_schema_uri == 'metadata_schema_uri_value' + assert response.training_pipeline == 'training_pipeline_value' + assert response.artifact_uri == 'artifact_uri_value' + assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] + assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] + assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_model_async_from_dict(): + await test_update_model_async(request_type=dict) + + +def test_update_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = 'model.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = gca_model.Model() + client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=model.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.UpdateModelRequest() + + request.model.name = 'model.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + await client.update_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'model.name=model.name/value', + ) in kw['metadata'] + + +def test_update_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_model( + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].model == gca_model.Model(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_model.Model() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_model( + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].model == gca_model.Model(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_model( + model_service.UpdateModelRequest(), + model=gca_model.Model(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_delete_model(transport: str = 'grpc', request_type=model_service.DeleteModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_model_from_dict(): + test_delete_model(request_type=dict) + + +def test_delete_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + client.delete_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + +@pytest.mark.asyncio +async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.DeleteModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_model_async_from_dict(): + await test_delete_model_async(request_type=dict) + + +def test_delete_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.DeleteModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_model( + model_service.DeleteModelRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_model( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_model( + model_service.DeleteModelRequest(), + name='name_value', + ) + + +def test_export_model(transport: str = 'grpc', request_type=model_service.ExportModelRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_export_model_from_dict(): + test_export_model(request_type=dict) + + +def test_export_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + client.export_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + +@pytest.mark.asyncio +async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=model_service.ExportModelRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ExportModelRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_export_model_async_from_dict(): + await test_export_model_async(request_type=dict) + + +def test_export_model_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ExportModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_model_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ExportModelRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.export_model(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_export_model_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_model( + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].output_config == model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') + + +def test_export_model_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_model( + model_service.ExportModelRequest(), + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + +@pytest.mark.asyncio +async def test_export_model_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_model), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_model( + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + assert args[0].output_config == model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') + + +@pytest.mark.asyncio +async def test_export_model_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_model( + model_service.ExportModelRequest(), + name='name_value', + output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), + ) + + +def test_get_model_evaluation(transport: str = 'grpc', request_type=model_service.GetModelEvaluationRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + ) + response = client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + + +def test_get_model_evaluation_from_dict(): + test_get_model_evaluation(request_type=dict) + + +def test_get_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + client.get_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + slice_dimensions=['slice_dimensions_value'], + )) + response = await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation.ModelEvaluation) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + assert response.slice_dimensions == ['slice_dimensions_value'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_async_from_dict(): + await test_get_model_evaluation_async(request_type=dict) + + +def test_get_model_evaluation_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = model_evaluation.ModelEvaluation() + client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + await client.get_model_evaluation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_model_evaluation_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_model_evaluation_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation( + model_service.GetModelEvaluationRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation.ModelEvaluation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_model_evaluation_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation( + model_service.GetModelEvaluationRequest(), + name='name_value', + ) + + +def test_list_model_evaluations(transport: str = 'grpc', request_type=model_service.ListModelEvaluationsRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluations_from_dict(): + test_list_model_evaluations(request_type=dict) + + +def test_list_model_evaluations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + client.list_model_evaluations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationsRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_from_dict(): + await test_list_model_evaluations_async(request_type=dict) + + +def test_list_model_evaluations_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = model_service.ListModelEvaluationsResponse() + client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluations_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) + await client.list_model_evaluations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_model_evaluations_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_model_evaluations_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluations( + model_service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluations_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluations( + model_service.ListModelEvaluationsRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluations_pager(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluations(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in results) + +def test_list_model_evaluations_pages(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluations(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluations(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation.ModelEvaluation) + for i in responses) + +@pytest.mark.asyncio +async def test_list_model_evaluations_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[], + next_page_token='def', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationsResponse( + model_evaluations=[ + model_evaluation.ModelEvaluation(), + model_evaluation.ModelEvaluation(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_evaluations(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_get_model_evaluation_slice(transport: str = 'grpc', request_type=model_service.GetModelEvaluationSliceRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + ) + response = client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + + +def test_get_model_evaluation_slice_from_dict(): + test_get_model_evaluation_slice(request_type=dict) + + +def test_get_model_evaluation_slice_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + client.get_model_evaluation_slice() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationSliceRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice( + name='name_value', + metrics_schema_uri='metrics_schema_uri_value', + )) + response = await client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.GetModelEvaluationSliceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) + assert response.name == 'name_value' + assert response.metrics_schema_uri == 'metrics_schema_uri_value' + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_async_from_dict(): + await test_get_model_evaluation_slice_async(request_type=dict) + + +def test_get_model_evaluation_slice_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationSliceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.GetModelEvaluationSliceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) + await client.get_model_evaluation_slice(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_model_evaluation_slice_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_model_evaluation_slice( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_model_evaluation_slice_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_model_evaluation_slice( + model_service.GetModelEvaluationSliceRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation_slice), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_evaluation_slice.ModelEvaluationSlice() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_model_evaluation_slice( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_model_evaluation_slice_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_model_evaluation_slice( + model_service.GetModelEvaluationSliceRequest(), + name='name_value', + ) + + +def test_list_model_evaluation_slices(transport: str = 'grpc', request_type=model_service.ListModelEvaluationSlicesRequest): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationSlicesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_model_evaluation_slices_from_dict(): + test_list_model_evaluation_slices(request_type=dict) + + +def test_list_model_evaluation_slices_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + client.list_model_evaluation_slices() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationSlicesRequest): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == model_service.ListModelEvaluationSlicesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListModelEvaluationSlicesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_from_dict(): + await test_list_model_evaluation_slices_async(request_type=dict) + + +def test_list_model_evaluation_slices_field_headers(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationSlicesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + call.return_value = model_service.ListModelEvaluationSlicesResponse() + client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_field_headers_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = model_service.ListModelEvaluationSlicesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) + await client.list_model_evaluation_slices(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_model_evaluation_slices_flattened(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_model_evaluation_slices( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_model_evaluation_slices_flattened_error(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_model_evaluation_slices( + model_service.ListModelEvaluationSlicesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_flattened_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = model_service.ListModelEvaluationSlicesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_model_evaluation_slices( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_flattened_error_async(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_model_evaluation_slices( + model_service.ListModelEvaluationSlicesRequest(), + parent='parent_value', + ) + + +def test_list_model_evaluation_slices_pager(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_model_evaluation_slices(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) + for i in results) + +def test_list_model_evaluation_slices_pages(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + pages = list(client.list_model_evaluation_slices(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_pager(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_model_evaluation_slices(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) + for i in responses) + +@pytest.mark.asyncio +async def test_list_model_evaluation_slices_async_pages(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluation_slices), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='abc', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[], + next_page_token='def', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + ], + next_page_token='ghi', + ), + model_service.ListModelEvaluationSlicesResponse( + model_evaluation_slices=[ + model_evaluation_slice.ModelEvaluationSlice(), + model_evaluation_slice.ModelEvaluationSlice(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_model_evaluation_slices(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ModelServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ModelServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ModelServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ModelServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ModelServiceGrpcTransport, + ) + +def test_model_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_model_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ModelServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'upload_model', + 'get_model', + 'list_models', + 'update_model', + 'delete_model', + 'export_model', + 'get_model_evaluation', + 'list_model_evaluations', + 'get_model_evaluation_slice', + 'list_model_evaluation_slices', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_model_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_model_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_model_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ModelServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_model_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ModelServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_model_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ModelServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_model_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ModelServiceGrpcTransport, + transports.ModelServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_model_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ModelServiceGrpcTransport, grpc_helpers), + (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_model_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_model_service_host_no_port(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_model_service_host_with_port(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_model_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_model_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ModelServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) +def test_model_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_model_service_grpc_lro_client(): + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_model_service_grpc_lro_async_client(): + client = ModelServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = ModelServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = ModelServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_model_path(): + project = "cuttlefish" + location = "mussel" + model = "winkle" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = ModelServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "model": "abalone", + } + path = ModelServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_path(path) + assert expected == actual + +def test_model_evaluation_path(): + project = "squid" + location = "clam" + model = "whelk" + evaluation = "octopus" + expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) + actual = ModelServiceClient.model_evaluation_path(project, location, model, evaluation) + assert expected == actual + + +def test_parse_model_evaluation_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "model": "cuttlefish", + "evaluation": "mussel", + } + path = ModelServiceClient.model_evaluation_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_evaluation_path(path) + assert expected == actual + +def test_model_evaluation_slice_path(): + project = "winkle" + location = "nautilus" + model = "scallop" + evaluation = "abalone" + slice = "squid" + expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) + actual = ModelServiceClient.model_evaluation_slice_path(project, location, model, evaluation, slice) + assert expected == actual + + +def test_parse_model_evaluation_slice_path(): + expected = { + "project": "clam", + "location": "whelk", + "model": "octopus", + "evaluation": "oyster", + "slice": "nudibranch", + } + path = ModelServiceClient.model_evaluation_slice_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_model_evaluation_slice_path(path) + assert expected == actual + +def test_training_pipeline_path(): + project = "cuttlefish" + location = "mussel" + training_pipeline = "winkle" + expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + actual = ModelServiceClient.training_pipeline_path(project, location, training_pipeline) + assert expected == actual + + +def test_parse_training_pipeline_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "training_pipeline": "abalone", + } + path = ModelServiceClient.training_pipeline_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_training_pipeline_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ModelServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ModelServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ModelServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ModelServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ModelServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ModelServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ModelServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ModelServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ModelServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ModelServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ModelServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: + client = ModelServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = ModelServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py new file mode 100644 index 0000000000..ea7e516df1 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py @@ -0,0 +1,3918 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.pipeline_service import PipelineServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.pipeline_service import PipelineServiceClient +from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers +from google.cloud.aiplatform_v1beta1.services.pipeline_service import transports +from google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import artifact +from google.cloud.aiplatform_v1beta1.types import context +from google.cloud.aiplatform_v1beta1.types import deployed_model_ref +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var +from google.cloud.aiplatform_v1beta1.types import execution +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import explanation_metadata +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import model +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job +from google.cloud.aiplatform_v1beta1.types import pipeline_service +from google.cloud.aiplatform_v1beta1.types import pipeline_state +from google.cloud.aiplatform_v1beta1.types import training_pipeline +from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline +from google.cloud.aiplatform_v1beta1.types import value +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PipelineServiceClient._get_default_mtls_endpoint(None) is None + assert PipelineServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + PipelineServiceClient, + PipelineServiceAsyncClient, +]) +def test_pipeline_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + PipelineServiceClient, + PipelineServiceAsyncClient, +]) +def test_pipeline_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PipelineServiceGrpcTransport, "grpc"), + (transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + PipelineServiceClient, + PipelineServiceAsyncClient, +]) +def test_pipeline_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_pipeline_service_client_get_transport_class(): + transport = PipelineServiceClient.get_transport_class() + available_transports = [ + transports.PipelineServiceGrpcTransport, + ] + assert transport in available_transports + + transport = PipelineServiceClient.get_transport_class("grpc") + assert transport == transports.PipelineServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +def test_pipeline_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "true"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "false"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) +@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_pipeline_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), + (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_pipeline_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_pipeline_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PipelineServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CreateTrainingPipelineRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + ) + response = client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +def test_create_training_pipeline_from_dict(): + test_create_training_pipeline(request_type=dict) + + +def test_create_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + client.create_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + +@pytest.mark.asyncio +async def test_create_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreateTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + )) + response = await client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreateTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_create_training_pipeline_async_from_dict(): + await test_create_training_pipeline_async(request_type=dict) + + +def test_create_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreateTrainingPipelineRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + call.return_value = gca_training_pipeline.TrainingPipeline() + client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreateTrainingPipelineRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) + await client.create_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_training_pipeline( + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].training_pipeline == gca_training_pipeline.TrainingPipeline(name='name_value') + + +def test_create_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_training_pipeline( + pipeline_service.CreateTrainingPipelineRequest(), + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_training_pipeline.TrainingPipeline() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_training_pipeline( + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].training_pipeline == gca_training_pipeline.TrainingPipeline(name='name_value') + + +@pytest.mark.asyncio +async def test_create_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_training_pipeline( + pipeline_service.CreateTrainingPipelineRequest(), + parent='parent_value', + training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), + ) + + +def test_get_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.GetTrainingPipelineRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + ) + response = client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +def test_get_training_pipeline_from_dict(): + test_get_training_pipeline(request_type=dict) + + +def test_get_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + client.get_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + +@pytest.mark.asyncio +async def test_get_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline( + name='name_value', + display_name='display_name_value', + training_task_definition='training_task_definition_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + )) + response = await client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, training_pipeline.TrainingPipeline) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.training_task_definition == 'training_task_definition_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + + +@pytest.mark.asyncio +async def test_get_training_pipeline_async_from_dict(): + await test_get_training_pipeline_async(request_type=dict) + + +def test_get_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + call.return_value = training_pipeline.TrainingPipeline() + client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) + await client.get_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_training_pipeline( + pipeline_service.GetTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = training_pipeline.TrainingPipeline() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_training_pipeline( + pipeline_service.GetTrainingPipelineRequest(), + name='name_value', + ) + + +def test_list_training_pipelines(transport: str = 'grpc', request_type=pipeline_service.ListTrainingPipelinesRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrainingPipelinesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_training_pipelines_from_dict(): + test_list_training_pipelines(request_type=dict) + + +def test_list_training_pipelines_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + client.list_training_pipelines() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + +@pytest.mark.asyncio +async def test_list_training_pipelines_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListTrainingPipelinesRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListTrainingPipelinesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_from_dict(): + await test_list_training_pipelines_async(request_type=dict) + + +def test_list_training_pipelines_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListTrainingPipelinesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_training_pipelines_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListTrainingPipelinesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) + await client.list_training_pipelines(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_training_pipelines_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_training_pipelines( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_training_pipelines_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_training_pipelines( + pipeline_service.ListTrainingPipelinesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_training_pipelines_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListTrainingPipelinesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_training_pipelines( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_training_pipelines_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_training_pipelines( + pipeline_service.ListTrainingPipelinesRequest(), + parent='parent_value', + ) + + +def test_list_training_pipelines_pager(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_training_pipelines(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, training_pipeline.TrainingPipeline) + for i in results) + +def test_list_training_pipelines_pages(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + pages = list(client.list_training_pipelines(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_training_pipelines(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, training_pipeline.TrainingPipeline) + for i in responses) + +@pytest.mark.asyncio +async def test_list_training_pipelines_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_training_pipelines), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + next_page_token='abc', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[], + next_page_token='def', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + ], + next_page_token='ghi', + ), + pipeline_service.ListTrainingPipelinesResponse( + training_pipelines=[ + training_pipeline.TrainingPipeline(), + training_pipeline.TrainingPipeline(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_training_pipelines(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.DeleteTrainingPipelineRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_training_pipeline_from_dict(): + test_delete_training_pipeline(request_type=dict) + + +def test_delete_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + client.delete_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeleteTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_async_from_dict(): + await test_delete_training_pipeline_async(request_type=dict) + + +def test_delete_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeleteTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeleteTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_training_pipeline( + pipeline_service.DeleteTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_training_pipeline( + pipeline_service.DeleteTrainingPipelineRequest(), + name='name_value', + ) + + +def test_cancel_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CancelTrainingPipelineRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_training_pipeline_from_dict(): + test_cancel_training_pipeline(request_type=dict) + + +def test_cancel_training_pipeline_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + client.cancel_training_pipeline() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelTrainingPipelineRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelTrainingPipelineRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_async_from_dict(): + await test_cancel_training_pipeline_async(request_type=dict) + + +def test_cancel_training_pipeline_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + call.return_value = None + client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelTrainingPipelineRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_training_pipeline(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_training_pipeline_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_training_pipeline_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_training_pipeline( + pipeline_service.CancelTrainingPipelineRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_training_pipeline), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_training_pipeline( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_training_pipeline_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_training_pipeline( + pipeline_service.CancelTrainingPipelineRequest(), + name='name_value', + ) + + +def test_create_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CreatePipelineJobRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + ) + response = client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + + +def test_create_pipeline_job_from_dict(): + test_create_pipeline_job(request_type=dict) + + +def test_create_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + client.create_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreatePipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + )) + response = await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async_from_dict(): + await test_create_pipeline_job_async(request_type=dict) + + +def test_create_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + call.return_value = gca_pipeline_job.PipelineJob() + client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) + await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_pipeline_job( + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name='name_value') + assert args[0].pipeline_job_id == 'pipeline_job_id_value' + + +def test_create_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_pipeline_job( + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name='name_value') + assert args[0].pipeline_job_id == 'pipeline_job_id_value' + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent='parent_value', + pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), + pipeline_job_id='pipeline_job_id_value', + ) + + +def test_get_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.GetPipelineJobRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + ) + response = client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + + +def test_get_pipeline_job_from_dict(): + test_get_pipeline_job(request_type=dict) + + +def test_get_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + client.get_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetPipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob( + name='name_value', + display_name='display_name_value', + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account='service_account_value', + network='network_value', + )) + response = await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == 'service_account_value' + assert response.network == 'network_value' + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async_from_dict(): + await test_get_pipeline_job_async(request_type=dict) + + +def test_get_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + call.return_value = pipeline_job.PipelineJob() + client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) + await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), + name='name_value', + ) + + +def test_list_pipeline_jobs(transport: str = 'grpc', request_type=pipeline_service.ListPipelineJobsRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_pipeline_jobs_from_dict(): + test_list_pipeline_jobs(request_type=dict) + + +def test_list_pipeline_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + client.list_pipeline_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListPipelineJobsRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_from_dict(): + await test_list_pipeline_jobs_async(request_type=dict) + + +def test_list_pipeline_jobs_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + call.return_value = pipeline_service.ListPipelineJobsResponse() + client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) + await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_pipeline_jobs_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_pipeline_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_pipeline_jobs_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_pipeline_jobs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), + parent='parent_value', + ) + + +def test_list_pipeline_jobs_pager(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_pipeline_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) + for i in results) + +def test_list_pipeline_jobs_pages(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_pipeline_jobs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_pipeline_jobs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) + for i in responses) + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token='abc', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], + next_page_token='def', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + ], + next_page_token='ghi', + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_pipeline_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.DeletePipelineJobRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_pipeline_job_from_dict(): + test_delete_pipeline_job(request_type=dict) + + +def test_delete_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + client.delete_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeletePipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async_from_dict(): + await test_delete_pipeline_job_async(request_type=dict) + + +def test_delete_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), + name='name_value', + ) + + +def test_cancel_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CancelPipelineJobRequest): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_pipeline_job_from_dict(): + test_cancel_pipeline_job(request_type=dict) + + +def test_cancel_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + client.cancel_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelPipelineJobRequest): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async_from_dict(): + await test_cancel_pipeline_job_async(request_type=dict) + + +def test_cancel_pipeline_job_field_headers(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + call.return_value = None + client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_pipeline_job_flattened(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_cancel_pipeline_job_flattened_error(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_pipeline_job( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), + name='name_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PipelineServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PipelineServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PipelineServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PipelineServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PipelineServiceGrpcTransport, + ) + +def test_pipeline_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PipelineServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_pipeline_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PipelineServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_training_pipeline', + 'get_training_pipeline', + 'list_training_pipelines', + 'delete_training_pipeline', + 'cancel_training_pipeline', + 'create_pipeline_job', + 'get_pipeline_job', + 'list_pipeline_jobs', + 'delete_pipeline_job', + 'cancel_pipeline_job', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_pipeline_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_pipeline_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_pipeline_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PipelineServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_pipeline_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PipelineServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_pipeline_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PipelineServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_pipeline_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PipelineServiceGrpcTransport, + transports.PipelineServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_pipeline_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PipelineServiceGrpcTransport, grpc_helpers), + (transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_pipeline_service_host_no_port(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_pipeline_service_host_with_port(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_pipeline_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PipelineServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_pipeline_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PipelineServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) +def test_pipeline_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_pipeline_service_grpc_lro_client(): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_pipeline_service_grpc_lro_async_client(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_artifact_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) + actual = PipelineServiceClient.artifact_path(project, location, metadata_store, artifact) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = PipelineServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_artifact_path(path) + assert expected == actual + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) + actual = PipelineServiceClient.context_path(project, location, metadata_store, context) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = PipelineServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_context_path(path) + assert expected == actual + +def test_custom_job_path(): + project = "oyster" + location = "nudibranch" + custom_job = "cuttlefish" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = PipelineServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "mussel", + "location": "winkle", + "custom_job": "nautilus", + } + path = PipelineServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_endpoint_path(): + project = "scallop" + location = "abalone" + endpoint = "squid" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = PipelineServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "clam", + "location": "whelk", + "endpoint": "octopus", + } + path = PipelineServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) + actual = PipelineServiceClient.execution_path(project, location, metadata_store, execution) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = PipelineServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_execution_path(path) + assert expected == actual + +def test_model_path(): + project = "squid" + location = "clam" + model = "whelk" + expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) + actual = PipelineServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "octopus", + "location": "oyster", + "model": "nudibranch", + } + path = PipelineServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_model_path(path) + assert expected == actual + +def test_network_path(): + project = "cuttlefish" + network = "mussel" + expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) + actual = PipelineServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = PipelineServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_network_path(path) + assert expected == actual + +def test_pipeline_job_path(): + project = "scallop" + location = "abalone" + pipeline_job = "squid" + expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) + actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) + assert expected == actual + + +def test_parse_pipeline_job_path(): + expected = { + "project": "clam", + "location": "whelk", + "pipeline_job": "octopus", + } + path = PipelineServiceClient.pipeline_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_pipeline_job_path(path) + assert expected == actual + +def test_training_pipeline_path(): + project = "oyster" + location = "nudibranch" + training_pipeline = "cuttlefish" + expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) + actual = PipelineServiceClient.training_pipeline_path(project, location, training_pipeline) + assert expected == actual + + +def test_parse_training_pipeline_path(): + expected = { + "project": "mussel", + "location": "winkle", + "training_pipeline": "nautilus", + } + path = PipelineServiceClient.training_pipeline_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_training_pipeline_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PipelineServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = PipelineServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format(folder=folder, ) + actual = PipelineServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = PipelineServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PipelineServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = PipelineServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format(project=project, ) + actual = PipelineServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = PipelineServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PipelineServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = PipelineServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PipelineServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py new file mode 100644 index 0000000000..22e6da9f92 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py @@ -0,0 +1,1426 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.prediction_service import PredictionServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.prediction_service import PredictionServiceClient +from google.cloud.aiplatform_v1beta1.services.prediction_service import transports +from google.cloud.aiplatform_v1beta1.services.prediction_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import explanation +from google.cloud.aiplatform_v1beta1.types import prediction_service +from google.oauth2 import service_account +from google.protobuf import struct_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PredictionServiceClient._get_default_mtls_endpoint(None) is None + assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, + PredictionServiceAsyncClient, +]) +def test_prediction_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, + PredictionServiceAsyncClient, +]) +def test_prediction_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PredictionServiceGrpcTransport, "grpc"), + (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + PredictionServiceClient, + PredictionServiceAsyncClient, +]) +def test_prediction_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_prediction_service_client_get_transport_class(): + transport = PredictionServiceClient.get_transport_class() + available_transports = [ + transports.PredictionServiceGrpcTransport, + ] + assert transport in available_transports + + transport = PredictionServiceClient.get_transport_class("grpc") + assert transport == transports.PredictionServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +def test_prediction_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) +@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), + (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_prediction_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = PredictionServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_predict(transport: str = 'grpc', request_type=prediction_service.PredictRequest): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse( + deployed_model_id='deployed_model_id_value', + ) + response = client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +def test_predict_from_dict(): + test_predict(request_type=dict) + + +def test_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + client.predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + +@pytest.mark.asyncio +async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( + deployed_model_id='deployed_model_id_value', + )) + response = await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.PredictRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.PredictResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +@pytest.mark.asyncio +async def test_predict_async_from_dict(): + await test_predict_async(request_type=dict) + + +def test_predict_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = prediction_service.PredictResponse() + client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_predict_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.PredictRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + await client.predict(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +def test_predict_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.predict( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + + +def test_predict_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.predict( + prediction_service.PredictRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + +@pytest.mark.asyncio +async def test_predict_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.predict), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.PredictResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.predict( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + + +@pytest.mark.asyncio +async def test_predict_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.predict( + prediction_service.PredictRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + ) + + +def test_explain(transport: str = 'grpc', request_type=prediction_service.ExplainRequest): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.ExplainResponse( + deployed_model_id='deployed_model_id_value', + ) + response = client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ExplainRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.ExplainResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +def test_explain_from_dict(): + test_explain(request_type=dict) + + +def test_explain_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + client.explain() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ExplainRequest() + + +@pytest.mark.asyncio +async def test_explain_async(transport: str = 'grpc_asyncio', request_type=prediction_service.ExplainRequest): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse( + deployed_model_id='deployed_model_id_value', + )) + response = await client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ExplainRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, prediction_service.ExplainResponse) + assert response.deployed_model_id == 'deployed_model_id_value' + + +@pytest.mark.asyncio +async def test_explain_async_from_dict(): + await test_explain_async(request_type=dict) + + +def test_explain_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.ExplainRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + call.return_value = prediction_service.ExplainResponse() + client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_explain_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.ExplainRequest() + + request.endpoint = 'endpoint/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) + await client.explain(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'endpoint=endpoint/value', + ) in kw['metadata'] + + +def test_explain_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.ExplainResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.explain( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + assert args[0].deployed_model_id == 'deployed_model_id_value' + + +def test_explain_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.explain( + prediction_service.ExplainRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + +@pytest.mark.asyncio +async def test_explain_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.explain), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = prediction_service.ExplainResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.explain( + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].endpoint == 'endpoint_value' + assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] + assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) + assert args[0].deployed_model_id == 'deployed_model_id_value' + + +@pytest.mark.asyncio +async def test_explain_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.explain( + prediction_service.ExplainRequest(), + endpoint='endpoint_value', + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], + parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + deployed_model_id='deployed_model_id_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PredictionServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PredictionServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PredictionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PredictionServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PredictionServiceGrpcTransport, + ) + +def test_prediction_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_prediction_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PredictionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'predict', + 'explain', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +@requires_google_auth_gte_1_25_0 +def test_prediction_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_prediction_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_prediction_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PredictionServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_prediction_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_prediction_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PredictionServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_prediction_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_prediction_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PredictionServiceGrpcTransport, grpc_helpers), + (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_prediction_service_host_no_port(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_prediction_service_host_with_port(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_prediction_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_prediction_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PredictionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) +def test_prediction_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_endpoint_path(): + project = "squid" + location = "clam" + endpoint = "whelk" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) + actual = PredictionServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "octopus", + "location": "oyster", + "endpoint": "nudibranch", + } + path = PredictionServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_endpoint_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PredictionServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = PredictionServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = PredictionServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = PredictionServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PredictionServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = PredictionServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = PredictionServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = PredictionServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PredictionServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = PredictionServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = PredictionServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py new file mode 100644 index 0000000000..8fcec601da --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py @@ -0,0 +1,2346 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import SpecialistPoolServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import SpecialistPoolServiceClient +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import transports +from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool +from google.cloud.aiplatform_v1beta1.types import specialist_pool_service +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(None) is None + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert SpecialistPoolServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + SpecialistPoolServiceClient, + SpecialistPoolServiceAsyncClient, +]) +def test_specialist_pool_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + SpecialistPoolServiceClient, + SpecialistPoolServiceAsyncClient, +]) +def test_specialist_pool_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + SpecialistPoolServiceClient, + SpecialistPoolServiceAsyncClient, +]) +def test_specialist_pool_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_specialist_pool_service_client_get_transport_class(): + transport = SpecialistPoolServiceClient.get_transport_class() + available_transports = [ + transports.SpecialistPoolServiceGrpcTransport, + ] + assert transport in available_transports + + transport = SpecialistPoolServiceClient.get_transport_class("grpc") + assert transport == transports.SpecialistPoolServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +def test_specialist_pool_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "true"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "false"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) +@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_specialist_pool_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), + (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_specialist_pool_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_specialist_pool_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = SpecialistPoolServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.CreateSpecialistPoolRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_specialist_pool_from_dict(): + test_create_specialist_pool(request_type=dict) + + +def test_create_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + client.create_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + +@pytest.mark.asyncio +async def test_create_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.CreateSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_specialist_pool_async_from_dict(): + await test_create_specialist_pool_async(request_type=dict) + + +def test_create_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.CreateSpecialistPoolRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.CreateSpecialistPoolRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_specialist_pool( + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') + + +def test_create_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_specialist_pool( + specialist_pool_service.CreateSpecialistPoolRequest(), + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_specialist_pool( + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') + + +@pytest.mark.asyncio +async def test_create_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_specialist_pool( + specialist_pool_service.CreateSpecialistPoolRequest(), + parent='parent_value', + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + ) + + +def test_get_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.GetSpecialistPoolRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool( + name='name_value', + display_name='display_name_value', + specialist_managers_count=2662, + specialist_manager_emails=['specialist_manager_emails_value'], + pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], + ) + response = client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, specialist_pool.SpecialistPool) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.specialist_managers_count == 2662 + assert response.specialist_manager_emails == ['specialist_manager_emails_value'] + assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] + + +def test_get_specialist_pool_from_dict(): + test_get_specialist_pool(request_type=dict) + + +def test_get_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + client.get_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + +@pytest.mark.asyncio +async def test_get_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.GetSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool( + name='name_value', + display_name='display_name_value', + specialist_managers_count=2662, + specialist_manager_emails=['specialist_manager_emails_value'], + pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], + )) + response = await client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, specialist_pool.SpecialistPool) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.specialist_managers_count == 2662 + assert response.specialist_manager_emails == ['specialist_manager_emails_value'] + assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] + + +@pytest.mark.asyncio +async def test_get_specialist_pool_async_from_dict(): + await test_get_specialist_pool_async(request_type=dict) + + +def test_get_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.GetSpecialistPoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + call.return_value = specialist_pool.SpecialistPool() + client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.GetSpecialistPoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) + await client.get_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_specialist_pool( + specialist_pool_service.GetSpecialistPoolRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool.SpecialistPool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_specialist_pool( + specialist_pool_service.GetSpecialistPoolRequest(), + name='name_value', + ) + + +def test_list_specialist_pools(transport: str = 'grpc', request_type=specialist_pool_service.ListSpecialistPoolsRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSpecialistPoolsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_specialist_pools_from_dict(): + test_list_specialist_pools(request_type=dict) + + +def test_list_specialist_pools_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + client.list_specialist_pools() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + +@pytest.mark.asyncio +async def test_list_specialist_pools_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.ListSpecialistPoolsRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_from_dict(): + await test_list_specialist_pools_async(request_type=dict) + + +def test_list_specialist_pools_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.ListSpecialistPoolsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_specialist_pools_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.ListSpecialistPoolsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) + await client.list_specialist_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_specialist_pools_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_specialist_pools( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_specialist_pools_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_specialist_pools( + specialist_pool_service.ListSpecialistPoolsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_specialist_pools_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_specialist_pools( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_specialist_pools_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_specialist_pools( + specialist_pool_service.ListSpecialistPoolsRequest(), + parent='parent_value', + ) + + +def test_list_specialist_pools_pager(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_specialist_pools(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, specialist_pool.SpecialistPool) + for i in results) + +def test_list_specialist_pools_pages(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + pages = list(client.list_specialist_pools(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_pager(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_specialist_pools(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, specialist_pool.SpecialistPool) + for i in responses) + +@pytest.mark.asyncio +async def test_list_specialist_pools_async_pages(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_specialist_pools), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + next_page_token='abc', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[], + next_page_token='def', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + ], + next_page_token='ghi', + ), + specialist_pool_service.ListSpecialistPoolsResponse( + specialist_pools=[ + specialist_pool.SpecialistPool(), + specialist_pool.SpecialistPool(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_specialist_pools(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_specialist_pool_from_dict(): + test_delete_specialist_pool(request_type=dict) + + +def test_delete_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + client.delete_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_async_from_dict(): + await test_delete_specialist_pool_async(request_type=dict) + + +def test_delete_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.DeleteSpecialistPoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.DeleteSpecialistPoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_specialist_pool( + specialist_pool_service.DeleteSpecialistPoolRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_specialist_pool( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_specialist_pool( + specialist_pool_service.DeleteSpecialistPoolRequest(), + name='name_value', + ) + + +def test_update_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_specialist_pool_from_dict(): + test_update_specialist_pool(request_type=dict) + + +def test_update_specialist_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + client.update_specialist_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + +@pytest.mark.asyncio +async def test_update_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_specialist_pool_async_from_dict(): + await test_update_specialist_pool_async(request_type=dict) + + +def test_update_specialist_pool_field_headers(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.UpdateSpecialistPoolRequest() + + request.specialist_pool.name = 'specialist_pool.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'specialist_pool.name=specialist_pool.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_specialist_pool_field_headers_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = specialist_pool_service.UpdateSpecialistPoolRequest() + + request.specialist_pool.name = 'specialist_pool.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_specialist_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'specialist_pool.name=specialist_pool.name/value', + ) in kw['metadata'] + + +def test_update_specialist_pool_flattened(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_specialist_pool( + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_specialist_pool_flattened_error(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_specialist_pool( + specialist_pool_service.UpdateSpecialistPoolRequest(), + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_specialist_pool_flattened_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_specialist_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_specialist_pool( + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_specialist_pool_flattened_error_async(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_specialist_pool( + specialist_pool_service.UpdateSpecialistPoolRequest(), + specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SpecialistPoolServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SpecialistPoolServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.SpecialistPoolServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.SpecialistPoolServiceGrpcTransport, + ) + +def test_specialist_pool_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SpecialistPoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_specialist_pool_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.SpecialistPoolServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_specialist_pool', + 'get_specialist_pool', + 'list_specialist_pools', + 'delete_specialist_pool', + 'update_specialist_pool', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_specialist_pool_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_specialist_pool_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_specialist_pool_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SpecialistPoolServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_specialist_pool_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SpecialistPoolServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_specialist_pool_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SpecialistPoolServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_specialist_pool_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.SpecialistPoolServiceGrpcTransport, + transports.SpecialistPoolServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_specialist_pool_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.SpecialistPoolServiceGrpcTransport, grpc_helpers), + (transports.SpecialistPoolServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_specialist_pool_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_specialist_pool_service_host_no_port(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_specialist_pool_service_host_with_port(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_specialist_pool_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpecialistPoolServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_specialist_pool_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) +def test_specialist_pool_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_specialist_pool_service_grpc_lro_client(): + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_specialist_pool_service_grpc_lro_async_client(): + client = SpecialistPoolServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_specialist_pool_path(): + project = "squid" + location = "clam" + specialist_pool = "whelk" + expected = "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) + actual = SpecialistPoolServiceClient.specialist_pool_path(project, location, specialist_pool) + assert expected == actual + + +def test_parse_specialist_pool_path(): + expected = { + "project": "octopus", + "location": "oyster", + "specialist_pool": "nudibranch", + } + path = SpecialistPoolServiceClient.specialist_pool_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_specialist_pool_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = SpecialistPoolServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = SpecialistPoolServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = SpecialistPoolServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = SpecialistPoolServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = SpecialistPoolServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = SpecialistPoolServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = SpecialistPoolServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = SpecialistPoolServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = SpecialistPoolServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = SpecialistPoolServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SpecialistPoolServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: + client = SpecialistPoolServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = SpecialistPoolServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py new file mode 100644 index 0000000000..db7bfea262 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py @@ -0,0 +1,7781 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import TensorboardServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import TensorboardServiceClient +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers +from google.cloud.aiplatform_v1beta1.services.tensorboard_service import transports +from google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard +from google.cloud.aiplatform_v1beta1.types import tensorboard_data +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment +from google.cloud.aiplatform_v1beta1.types import tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run +from google.cloud.aiplatform_v1beta1.types import tensorboard_service +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series +from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TensorboardServiceClient._get_default_mtls_endpoint(None) is None + assert TensorboardServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TensorboardServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + TensorboardServiceClient, + TensorboardServiceAsyncClient, +]) +def test_tensorboard_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + TensorboardServiceClient, + TensorboardServiceAsyncClient, +]) +def test_tensorboard_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TensorboardServiceGrpcTransport, "grpc"), + (transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_tensorboard_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + TensorboardServiceClient, + TensorboardServiceAsyncClient, +]) +def test_tensorboard_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_tensorboard_service_client_get_transport_class(): + transport = TensorboardServiceClient.get_transport_class() + available_transports = [ + transports.TensorboardServiceGrpcTransport, + ] + assert transport in available_transports + + transport = TensorboardServiceClient.get_transport_class("grpc") + assert transport == transports.TensorboardServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) +@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) +def test_tensorboard_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "true"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "false"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) +@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_tensorboard_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_tensorboard_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), + (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_tensorboard_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_tensorboard_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = TensorboardServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_tensorboard_from_dict(): + test_create_tensorboard(request_type=dict) + + +def test_create_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + client.create_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + +@pytest.mark.asyncio +async def test_create_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_tensorboard_async_from_dict(): + await test_create_tensorboard_async(request_type=dict) + + +def test_create_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard( + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].tensorboard == gca_tensorboard.Tensorboard(name='name_value') + + +def test_create_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard( + tensorboard_service.CreateTensorboardRequest(), + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard( + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].tensorboard == gca_tensorboard.Tensorboard(name='name_value') + + +@pytest.mark.asyncio +async def test_create_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard( + tensorboard_service.CreateTensorboardRequest(), + parent='parent_value', + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + ) + + +def test_get_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard( + name='name_value', + display_name='display_name_value', + description='description_value', + blob_storage_path_prefix='blob_storage_path_prefix_value', + run_count=989, + etag='etag_value', + ) + response = client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard.Tensorboard) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' + assert response.run_count == 989 + assert response.etag == 'etag_value' + + +def test_get_tensorboard_from_dict(): + test_get_tensorboard(request_type=dict) + + +def test_get_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + client.get_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRequest() + + +@pytest.mark.asyncio +async def test_get_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard( + name='name_value', + display_name='display_name_value', + description='description_value', + blob_storage_path_prefix='blob_storage_path_prefix_value', + run_count=989, + etag='etag_value', + )) + response = await client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard.Tensorboard) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' + assert response.run_count == 989 + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_async_from_dict(): + await test_get_tensorboard_async(request_type=dict) + + +def test_get_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + call.return_value = tensorboard.Tensorboard() + client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) + await client.get_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard( + tensorboard_service.GetTensorboardRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard.Tensorboard() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard( + tensorboard_service.GetTensorboardRequest(), + name='name_value', + ) + + +def test_update_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_tensorboard_from_dict(): + test_update_tensorboard(request_type=dict) + + +def test_update_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + client.update_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + +@pytest.mark.asyncio +async def test_update_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_tensorboard_async_from_dict(): + await test_update_tensorboard_async(request_type=dict) + + +def test_update_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRequest() + + request.tensorboard.name = 'tensorboard.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard.name=tensorboard.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRequest() + + request.tensorboard.name = 'tensorboard.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard.name=tensorboard.name/value', + ) in kw['metadata'] + + +def test_update_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard( + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard == gca_tensorboard.Tensorboard(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard( + tensorboard_service.UpdateTensorboardRequest(), + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard( + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard == gca_tensorboard.Tensorboard(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard( + tensorboard_service.UpdateTensorboardRequest(), + tensorboard=gca_tensorboard.Tensorboard(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_list_tensorboards(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardsRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboards_from_dict(): + test_list_tensorboards(request_type=dict) + + +def test_list_tensorboards_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + client.list_tensorboards() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + +@pytest.mark.asyncio +async def test_list_tensorboards_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboards_async_from_dict(): + await test_list_tensorboards_async(request_type=dict) + + +def test_list_tensorboards_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardsResponse() + client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboards_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) + await client.list_tensorboards(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_tensorboards_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboards( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_tensorboards_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboards( + tensorboard_service.ListTensorboardsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_tensorboards_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboards( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_tensorboards_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboards( + tensorboard_service.ListTensorboardsRequest(), + parent='parent_value', + ) + + +def test_list_tensorboards_pager(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboards(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, tensorboard.Tensorboard) + for i in results) + +def test_list_tensorboards_pages(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboards(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboards_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboards(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard.Tensorboard) + for i in responses) + +@pytest.mark.asyncio +async def test_list_tensorboards_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboards), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardsResponse( + tensorboards=[ + tensorboard.Tensorboard(), + tensorboard.Tensorboard(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboards(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_from_dict(): + test_delete_tensorboard(request_type=dict) + + +def test_delete_tensorboard_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + client.delete_tensorboard() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + +@pytest.mark.asyncio +async def test_delete_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_async_from_dict(): + await test_delete_tensorboard_async(request_type=dict) + + +def test_delete_tensorboard_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_tensorboard_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_tensorboard_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard( + tensorboard_service.DeleteTensorboardRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_tensorboard_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard( + tensorboard_service.DeleteTensorboardRequest(), + name='name_value', + ) + + +def test_create_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardExperimentRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + ) + response = client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +def test_create_tensorboard_experiment_from_dict(): + test_create_tensorboard_experiment(request_type=dict) + + +def test_create_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + client.create_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + )) + response = await client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_async_from_dict(): + await test_create_tensorboard_experiment_async(request_type=dict) + + +def test_create_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardExperimentRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardExperimentRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + await client.create_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_experiment( + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert args[0].tensorboard_experiment_id == 'tensorboard_experiment_id_value' + + +def test_create_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_experiment( + tensorboard_service.CreateTensorboardExperimentRequest(), + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_experiment( + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert args[0].tensorboard_experiment_id == 'tensorboard_experiment_id_value' + + +@pytest.mark.asyncio +async def test_create_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_experiment( + tensorboard_service.CreateTensorboardExperimentRequest(), + parent='parent_value', + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + tensorboard_experiment_id='tensorboard_experiment_id_value', + ) + + +def test_get_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardExperimentRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + ) + response = client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +def test_get_tensorboard_experiment_from_dict(): + test_get_tensorboard_experiment(request_type=dict) + + +def test_get_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + client.get_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + )) + response = await client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_async_from_dict(): + await test_get_tensorboard_experiment_async(request_type=dict) + + +def test_get_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardExperimentRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + call.return_value = tensorboard_experiment.TensorboardExperiment() + client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardExperimentRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) + await client.get_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_experiment( + tensorboard_service.GetTensorboardExperimentRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_experiment( + tensorboard_service.GetTensorboardExperimentRequest(), + name='name_value', + ) + + +def test_update_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + ) + response = client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +def test_update_tensorboard_experiment_from_dict(): + test_update_tensorboard_experiment(request_type=dict) + + +def test_update_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + client.update_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + source='source_value', + )) + response = await client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + assert response.source == 'source_value' + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_async_from_dict(): + await test_update_tensorboard_experiment_async(request_type=dict) + + +def test_update_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardExperimentRequest() + + request.tensorboard_experiment.name = 'tensorboard_experiment.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment.name=tensorboard_experiment.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardExperimentRequest() + + request.tensorboard_experiment.name = 'tensorboard_experiment.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + await client.update_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_experiment.name=tensorboard_experiment.name/value', + ) in kw['metadata'] + + +def test_update_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_experiment( + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_experiment( + tensorboard_service.UpdateTensorboardExperimentRequest(), + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_experiment.TensorboardExperiment() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_experiment( + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_experiment( + tensorboard_service.UpdateTensorboardExperimentRequest(), + tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_list_tensorboard_experiments(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardExperimentsRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardExperimentsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboard_experiments_from_dict(): + test_list_tensorboard_experiments(request_type=dict) + + +def test_list_tensorboard_experiments_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + client.list_tensorboard_experiments() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardExperimentsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardExperimentsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_from_dict(): + await test_list_tensorboard_experiments_async(request_type=dict) + + +def test_list_tensorboard_experiments_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardExperimentsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardExperimentsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) + await client.list_tensorboard_experiments(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_tensorboard_experiments_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_experiments( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_tensorboard_experiments_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_experiments( + tensorboard_service.ListTensorboardExperimentsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_experiments( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_experiments( + tensorboard_service.ListTensorboardExperimentsRequest(), + parent='parent_value', + ) + + +def test_list_tensorboard_experiments_pager(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboard_experiments(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) + for i in results) + +def test_list_tensorboard_experiments_pages(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_experiments(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_experiments(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) + for i in responses) + +@pytest.mark.asyncio +async def test_list_tensorboard_experiments_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_experiments), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardExperimentsResponse( + tensorboard_experiments=[ + tensorboard_experiment.TensorboardExperiment(), + tensorboard_experiment.TensorboardExperiment(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_experiments(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_experiment_from_dict(): + test_delete_tensorboard_experiment(request_type=dict) + + +def test_delete_tensorboard_experiment_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + client.delete_tensorboard_experiment() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_async_from_dict(): + await test_delete_tensorboard_experiment_async(request_type=dict) + + +def test_delete_tensorboard_experiment_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardExperimentRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardExperimentRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard_experiment(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_tensorboard_experiment_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_tensorboard_experiment_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_experiment( + tensorboard_service.DeleteTensorboardExperimentRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_experiment), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_experiment( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_tensorboard_experiment_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_experiment( + tensorboard_service.DeleteTensorboardExperimentRequest(), + name='name_value', + ) + + +def test_create_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardRunRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_create_tensorboard_run_from_dict(): + test_create_tensorboard_run(request_type=dict) + + +def test_create_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + client.create_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_async_from_dict(): + await test_create_tensorboard_run_async(request_type=dict) + + +def test_create_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRunRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + call.return_value = gca_tensorboard_run.TensorboardRun() + client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardRunRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + await client.create_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_run( + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun(name='name_value') + assert args[0].tensorboard_run_id == 'tensorboard_run_id_value' + + +def test_create_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_run( + tensorboard_service.CreateTensorboardRunRequest(), + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_run( + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun(name='name_value') + assert args[0].tensorboard_run_id == 'tensorboard_run_id_value' + + +@pytest.mark.asyncio +async def test_create_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_run( + tensorboard_service.CreateTensorboardRunRequest(), + parent='parent_value', + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + tensorboard_run_id='tensorboard_run_id_value', + ) + + +def test_get_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardRunRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_get_tensorboard_run_from_dict(): + test_get_tensorboard_run(request_type=dict) + + +def test_get_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + client.get_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_async_from_dict(): + await test_get_tensorboard_run_async(request_type=dict) + + +def test_get_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRunRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + call.return_value = tensorboard_run.TensorboardRun() + client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardRunRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) + await client.get_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_run( + tensorboard_service.GetTensorboardRunRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_run( + tensorboard_service.GetTensorboardRunRequest(), + name='name_value', + ) + + +def test_update_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardRunRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + ) + response = client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +def test_update_tensorboard_run_from_dict(): + test_update_tensorboard_run(request_type=dict) + + +def test_update_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + client.update_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( + name='name_value', + display_name='display_name_value', + description='description_value', + etag='etag_value', + )) + response = await client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_run.TensorboardRun) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.etag == 'etag_value' + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_async_from_dict(): + await test_update_tensorboard_run_async(request_type=dict) + + +def test_update_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRunRequest() + + request.tensorboard_run.name = 'tensorboard_run.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + call.return_value = gca_tensorboard_run.TensorboardRun() + client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run.name=tensorboard_run.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardRunRequest() + + request.tensorboard_run.name = 'tensorboard_run.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + await client.update_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run.name=tensorboard_run.name/value', + ) in kw['metadata'] + + +def test_update_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_run( + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_run( + tensorboard_service.UpdateTensorboardRunRequest(), + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_run.TensorboardRun() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_run( + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_run( + tensorboard_service.UpdateTensorboardRunRequest(), + tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_list_tensorboard_runs(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardRunsRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardRunsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboard_runs_from_dict(): + test_list_tensorboard_runs(request_type=dict) + + +def test_list_tensorboard_runs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + client.list_tensorboard_runs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardRunsRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardRunsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardRunsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_from_dict(): + await test_list_tensorboard_runs_async(request_type=dict) + + +def test_list_tensorboard_runs_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardRunsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardRunsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) + await client.list_tensorboard_runs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_tensorboard_runs_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_runs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_tensorboard_runs_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_runs( + tensorboard_service.ListTensorboardRunsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardRunsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_runs( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_runs( + tensorboard_service.ListTensorboardRunsRequest(), + parent='parent_value', + ) + + +def test_list_tensorboard_runs_pager(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboard_runs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, tensorboard_run.TensorboardRun) + for i in results) + +def test_list_tensorboard_runs_pages(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_runs(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_runs(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_run.TensorboardRun) + for i in responses) + +@pytest.mark.asyncio +async def test_list_tensorboard_runs_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_runs), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardRunsResponse( + tensorboard_runs=[ + tensorboard_run.TensorboardRun(), + tensorboard_run.TensorboardRun(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_runs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardRunRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_run_from_dict(): + test_delete_tensorboard_run(request_type=dict) + + +def test_delete_tensorboard_run_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + client.delete_tensorboard_run() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRunRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_async_from_dict(): + await test_delete_tensorboard_run_async(request_type=dict) + + +def test_delete_tensorboard_run_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardRunRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard_run(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_tensorboard_run_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_tensorboard_run_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_run), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_run( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_tensorboard_run_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_run( + tensorboard_service.DeleteTensorboardRunRequest(), + name='name_value', + ) + + +def test_create_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + ) + response = client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +def test_create_tensorboard_time_series_from_dict(): + test_create_tensorboard_time_series(request_type=dict) + + +def test_create_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + client.create_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + )) + response = await client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_async_from_dict(): + await test_create_tensorboard_time_series_async(request_type=dict) + + +def test_create_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardTimeSeriesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.CreateTensorboardTimeSeriesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + await client.create_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tensorboard_time_series( + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + + +def test_create_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tensorboard_time_series( + tensorboard_service.CreateTensorboardTimeSeriesRequest(), + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tensorboard_time_series( + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + + +@pytest.mark.asyncio +async def test_create_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tensorboard_time_series( + tensorboard_service.CreateTensorboardTimeSeriesRequest(), + parent='parent_value', + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + ) + + +def test_get_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + ) + response = client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +def test_get_tensorboard_time_series_from_dict(): + test_get_tensorboard_time_series(request_type=dict) + + +def test_get_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + client.get_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + )) + response = await client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_async_from_dict(): + await test_get_tensorboard_time_series_async(request_type=dict) + + +def test_get_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardTimeSeriesRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.GetTensorboardTimeSeriesRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) + await client.get_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tensorboard_time_series( + tensorboard_service.GetTensorboardTimeSeriesRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tensorboard_time_series( + tensorboard_service.GetTensorboardTimeSeriesRequest(), + name='name_value', + ) + + +def test_update_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + ) + response = client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +def test_update_tensorboard_time_series_from_dict(): + test_update_tensorboard_time_series(request_type=dict) + + +def test_update_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + client.update_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( + name='name_value', + display_name='display_name_value', + description='description_value', + value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, + etag='etag_value', + plugin_name='plugin_name_value', + plugin_data=b'plugin_data_blob', + )) + response = await client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.description == 'description_value' + assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR + assert response.etag == 'etag_value' + assert response.plugin_name == 'plugin_name_value' + assert response.plugin_data == b'plugin_data_blob' + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_async_from_dict(): + await test_update_tensorboard_time_series_async(request_type=dict) + + +def test_update_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + request.tensorboard_time_series.name = 'tensorboard_time_series.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series.name=tensorboard_time_series.name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() + + request.tensorboard_time_series.name = 'tensorboard_time_series.name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + await client.update_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series.name=tensorboard_time_series.name/value', + ) in kw['metadata'] + + +def test_update_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_tensorboard_time_series( + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +def test_update_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_tensorboard_time_series( + tensorboard_service.UpdateTensorboardTimeSeriesRequest(), + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_tensorboard_time_series( + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) + + +@pytest.mark.asyncio +async def test_update_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_tensorboard_time_series( + tensorboard_service.UpdateTensorboardTimeSeriesRequest(), + tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_list_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardTimeSeriesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tensorboard_time_series_from_dict(): + test_list_tensorboard_time_series(request_type=dict) + + +def test_list_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + client.list_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTensorboardTimeSeriesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_from_dict(): + await test_list_tensorboard_time_series_async(request_type=dict) + + +def test_list_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardTimeSeriesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ListTensorboardTimeSeriesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) + await client.list_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tensorboard_time_series( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tensorboard_time_series( + tensorboard_service.ListTensorboardTimeSeriesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tensorboard_time_series( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tensorboard_time_series( + tensorboard_service.ListTensorboardTimeSeriesRequest(), + parent='parent_value', + ) + + +def test_list_tensorboard_time_series_pager(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tensorboard_time_series(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) + for i in results) + +def test_list_tensorboard_time_series_pages(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tensorboard_time_series(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tensorboard_time_series(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) + for i in responses) + +@pytest.mark.asyncio +async def test_list_tensorboard_time_series_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tensorboard_time_series), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='abc', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[], + next_page_token='def', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + ], + next_page_token='ghi', + ), + tensorboard_service.ListTensorboardTimeSeriesResponse( + tensorboard_time_series=[ + tensorboard_time_series.TensorboardTimeSeries(), + tensorboard_time_series.TensorboardTimeSeries(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tensorboard_time_series(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_tensorboard_time_series_from_dict(): + test_delete_tensorboard_time_series(request_type=dict) + + +def test_delete_tensorboard_time_series_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + client.delete_tensorboard_time_series() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_async_from_dict(): + await test_delete_tensorboard_time_series_async(request_type=dict) + + +def test_delete_tensorboard_time_series_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.delete_tensorboard_time_series(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_tensorboard_time_series_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_tensorboard_time_series_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_tensorboard_time_series( + tensorboard_service.DeleteTensorboardTimeSeriesRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_tensorboard_time_series), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_tensorboard_time_series( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_tensorboard_time_series_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_tensorboard_time_series( + tensorboard_service.DeleteTensorboardTimeSeriesRequest(), + name='name_value', + ) + + +def test_read_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse( + ) + response = client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) + + +def test_read_tensorboard_time_series_data_from_dict(): + test_read_tensorboard_time_series_data(request_type=dict) + + +def test_read_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + client.read_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse( + )) + response = await client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_async_from_dict(): + await test_read_tensorboard_time_series_data_async(request_type=dict) + + +def test_read_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) + await client.read_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series/value', + ) in kw['metadata'] + + +def test_read_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_time_series == 'tensorboard_time_series_value' + + +def test_read_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_tensorboard_time_series_data( + tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_time_series == 'tensorboard_time_series_value' + + +@pytest.mark.asyncio +async def test_read_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_tensorboard_time_series_data( + tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + + +def test_read_tensorboard_blob_data(transport: str = 'grpc', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + response = client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) + + +def test_read_tensorboard_blob_data_from_dict(): + test_read_tensorboard_blob_data(request_type=dict) + + +def test_read_tensorboard_blob_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + client.read_tensorboard_blob_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) + response = await client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_async_from_dict(): + await test_read_tensorboard_blob_data_async(request_type=dict) + + +def test_read_tensorboard_blob_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardBlobDataRequest() + + request.time_series = 'time_series/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'time_series=time_series/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ReadTensorboardBlobDataRequest() + + request.time_series = 'time_series/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) + await client.read_tensorboard_blob_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'time_series=time_series/value', + ) in kw['metadata'] + + +def test_read_tensorboard_blob_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_tensorboard_blob_data( + time_series='time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].time_series == 'time_series_value' + + +def test_read_tensorboard_blob_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_tensorboard_blob_data( + tensorboard_service.ReadTensorboardBlobDataRequest(), + time_series='time_series_value', + ) + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_tensorboard_blob_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_tensorboard_blob_data( + time_series='time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].time_series == 'time_series_value' + + +@pytest.mark.asyncio +async def test_read_tensorboard_blob_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_tensorboard_blob_data( + tensorboard_service.ReadTensorboardBlobDataRequest(), + time_series='time_series_value', + ) + + +def test_write_tensorboard_run_data(transport: str = 'grpc', request_type=tensorboard_service.WriteTensorboardRunDataRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse( + ) + response = client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) + + +def test_write_tensorboard_run_data_from_dict(): + test_write_tensorboard_run_data(request_type=dict) + + +def test_write_tensorboard_run_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + client.write_tensorboard_run_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardRunDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse( + )) + response = await client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_async_from_dict(): + await test_write_tensorboard_run_data_async(request_type=dict) + + +def test_write_tensorboard_run_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardRunDataRequest() + + request.tensorboard_run = 'tensorboard_run/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run=tensorboard_run/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.WriteTensorboardRunDataRequest() + + request.tensorboard_run = 'tensorboard_run/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) + await client.write_tensorboard_run_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_run=tensorboard_run/value', + ) in kw['metadata'] + + +def test_write_tensorboard_run_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.write_tensorboard_run_data( + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_run == 'tensorboard_run_value' + assert args[0].time_series_data == [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] + + +def test_write_tensorboard_run_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.write_tensorboard_run_data( + tensorboard_service.WriteTensorboardRunDataRequest(), + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.write_tensorboard_run_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.write_tensorboard_run_data( + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_run == 'tensorboard_run_value' + assert args[0].time_series_data == [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] + + +@pytest.mark.asyncio +async def test_write_tensorboard_run_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.write_tensorboard_run_data( + tensorboard_service.WriteTensorboardRunDataRequest(), + tensorboard_run='tensorboard_run_value', + time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], + ) + + +def test_export_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + next_page_token='next_page_token_value', + ) + response = client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_export_tensorboard_time_series_data_from_dict(): + test_export_tensorboard_time_series_data(request_type=dict) + + +def test_export_tensorboard_time_series_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + client.export_tensorboard_time_series_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + next_page_token='next_page_token_value', + )) + response = await client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_from_dict(): + await test_export_tensorboard_time_series_data_async(request_type=dict) + + +def test_export_tensorboard_time_series_data_field_headers(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_field_headers_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() + + request.tensorboard_time_series = 'tensorboard_time_series/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) + await client.export_tensorboard_time_series_data(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'tensorboard_time_series=tensorboard_time_series/value', + ) in kw['metadata'] + + +def test_export_tensorboard_time_series_data_flattened(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.export_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_time_series == 'tensorboard_time_series_value' + + +def test_export_tensorboard_time_series_data_flattened_error(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.export_tensorboard_time_series_data( + tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_flattened_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.export_tensorboard_time_series_data( + tensorboard_time_series='tensorboard_time_series_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].tensorboard_time_series == 'tensorboard_time_series_value' + + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_flattened_error_async(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.export_tensorboard_time_series_data( + tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), + tensorboard_time_series='tensorboard_time_series_value', + ) + + +def test_export_tensorboard_time_series_data_pager(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('tensorboard_time_series', ''), + )), + ) + pager = client.export_tensorboard_time_series_data(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) + for i in results) + +def test_export_tensorboard_time_series_data_pages(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + pages = list(client.export_tensorboard_time_series_data(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_pager(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + async_pager = await client.export_tensorboard_time_series_data(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) + for i in responses) + +@pytest.mark.asyncio +async def test_export_tensorboard_time_series_data_async_pages(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_tensorboard_time_series_data), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='abc', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[], + next_page_token='def', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + ], + next_page_token='ghi', + ), + tensorboard_service.ExportTensorboardTimeSeriesDataResponse( + time_series_data_points=[ + tensorboard_data.TimeSeriesDataPoint(), + tensorboard_data.TimeSeriesDataPoint(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.export_tensorboard_time_series_data(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TensorboardServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TensorboardServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.TensorboardServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.TensorboardServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.TensorboardServiceGrpcTransport, + ) + +def test_tensorboard_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TensorboardServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_tensorboard_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TensorboardServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_tensorboard', + 'get_tensorboard', + 'update_tensorboard', + 'list_tensorboards', + 'delete_tensorboard', + 'create_tensorboard_experiment', + 'get_tensorboard_experiment', + 'update_tensorboard_experiment', + 'list_tensorboard_experiments', + 'delete_tensorboard_experiment', + 'create_tensorboard_run', + 'get_tensorboard_run', + 'update_tensorboard_run', + 'list_tensorboard_runs', + 'delete_tensorboard_run', + 'create_tensorboard_time_series', + 'get_tensorboard_time_series', + 'update_tensorboard_time_series', + 'list_tensorboard_time_series', + 'delete_tensorboard_time_series', + 'read_tensorboard_time_series_data', + 'read_tensorboard_blob_data', + 'write_tensorboard_run_data', + 'export_tensorboard_time_series_data', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_tensorboard_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TensorboardServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_tensorboard_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TensorboardServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_tensorboard_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TensorboardServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_tensorboard_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TensorboardServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_tensorboard_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TensorboardServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_tensorboard_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.TensorboardServiceGrpcTransport, + transports.TensorboardServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_tensorboard_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.TensorboardServiceGrpcTransport, grpc_helpers), + (transports.TensorboardServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_tensorboard_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) +def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_tensorboard_service_host_no_port(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_tensorboard_service_host_with_port(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_tensorboard_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TensorboardServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_tensorboard_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.TensorboardServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) +def test_tensorboard_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) +def test_tensorboard_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_tensorboard_service_grpc_lro_client(): + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_tensorboard_service_grpc_lro_async_client(): + client = TensorboardServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_tensorboard_path(): + project = "squid" + location = "clam" + tensorboard = "whelk" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) + actual = TensorboardServiceClient.tensorboard_path(project, location, tensorboard) + assert expected == actual + + +def test_parse_tensorboard_path(): + expected = { + "project": "octopus", + "location": "oyster", + "tensorboard": "nudibranch", + } + path = TensorboardServiceClient.tensorboard_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_path(path) + assert expected == actual + +def test_tensorboard_experiment_path(): + project = "cuttlefish" + location = "mussel" + tensorboard = "winkle" + experiment = "nautilus" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) + actual = TensorboardServiceClient.tensorboard_experiment_path(project, location, tensorboard, experiment) + assert expected == actual + + +def test_parse_tensorboard_experiment_path(): + expected = { + "project": "scallop", + "location": "abalone", + "tensorboard": "squid", + "experiment": "clam", + } + path = TensorboardServiceClient.tensorboard_experiment_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_experiment_path(path) + assert expected == actual + +def test_tensorboard_run_path(): + project = "whelk" + location = "octopus" + tensorboard = "oyster" + experiment = "nudibranch" + run = "cuttlefish" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) + actual = TensorboardServiceClient.tensorboard_run_path(project, location, tensorboard, experiment, run) + assert expected == actual + + +def test_parse_tensorboard_run_path(): + expected = { + "project": "mussel", + "location": "winkle", + "tensorboard": "nautilus", + "experiment": "scallop", + "run": "abalone", + } + path = TensorboardServiceClient.tensorboard_run_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_run_path(path) + assert expected == actual + +def test_tensorboard_time_series_path(): + project = "squid" + location = "clam" + tensorboard = "whelk" + experiment = "octopus" + run = "oyster" + time_series = "nudibranch" + expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) + actual = TensorboardServiceClient.tensorboard_time_series_path(project, location, tensorboard, experiment, run, time_series) + assert expected == actual + + +def test_parse_tensorboard_time_series_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + "tensorboard": "winkle", + "experiment": "nautilus", + "run": "scallop", + "time_series": "abalone", + } + path = TensorboardServiceClient.tensorboard_time_series_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_tensorboard_time_series_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TensorboardServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TensorboardServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TensorboardServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TensorboardServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TensorboardServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TensorboardServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TensorboardServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TensorboardServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TensorboardServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TensorboardServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TensorboardServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: + client = TensorboardServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = TensorboardServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py new file mode 100644 index 0000000000..dc3164b9a5 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py @@ -0,0 +1,4599 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceAsyncClient +from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceClient +from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers +from google.cloud.aiplatform_v1beta1.services.vizier_service import transports +from google.cloud.aiplatform_v1beta1.services.vizier_service.transports.base import _GOOGLE_AUTH_VERSION +from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import study as gca_study +from google.cloud.aiplatform_v1beta1.types import vizier_service +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert VizierServiceClient._get_default_mtls_endpoint(None) is None + assert VizierServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert VizierServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + VizierServiceClient, + VizierServiceAsyncClient, +]) +def test_vizier_service_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +@pytest.mark.parametrize("client_class", [ + VizierServiceClient, + VizierServiceAsyncClient, +]) +def test_vizier_service_client_service_account_always_use_jwt(client_class): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.VizierServiceGrpcTransport, "grpc"), + (transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_vizier_service_client_service_account_always_use_jwt_true(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + +@pytest.mark.parametrize("client_class", [ + VizierServiceClient, + VizierServiceAsyncClient, +]) +def test_vizier_service_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_vizier_service_client_get_transport_class(): + transport = VizierServiceClient.get_transport_class() + available_transports = [ + transports.VizierServiceGrpcTransport, + ] + assert transport in available_transports + + transport = VizierServiceClient.get_transport_class("grpc") + assert transport == transports.VizierServiceGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) +@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) +def test_vizier_service_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "true"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "false"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) +@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_vizier_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_vizier_service_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), + (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_vizier_service_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_vizier_service_client_client_options_from_dict(): + with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = VizierServiceClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_study(transport: str = 'grpc', request_type=vizier_service.CreateStudyRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_study.Study( + name='name_value', + display_name='display_name_value', + state=gca_study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + ) + response = client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +def test_create_study_from_dict(): + test_create_study(request_type=dict) + + +def test_create_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + client.create_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateStudyRequest() + + +@pytest.mark.asyncio +async def test_create_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study( + name='name_value', + display_name='display_name_value', + state=gca_study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + )) + response = await client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == gca_study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +@pytest.mark.asyncio +async def test_create_study_async_from_dict(): + await test_create_study_async(request_type=dict) + + +def test_create_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateStudyRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + call.return_value = gca_study.Study() + client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateStudyRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) + await client.create_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_study.Study() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_study( + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].study == gca_study.Study(name='name_value') + + +def test_create_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_study( + vizier_service.CreateStudyRequest(), + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gca_study.Study() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_study( + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].study == gca_study.Study(name='name_value') + + +@pytest.mark.asyncio +async def test_create_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_study( + vizier_service.CreateStudyRequest(), + parent='parent_value', + study=gca_study.Study(name='name_value'), + ) + + +def test_get_study(transport: str = 'grpc', request_type=vizier_service.GetStudyRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + ) + response = client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +def test_get_study_from_dict(): + test_get_study(request_type=dict) + + +def test_get_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + client.get_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetStudyRequest() + + +@pytest.mark.asyncio +async def test_get_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + )) + response = await client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +@pytest.mark.asyncio +async def test_get_study_async_from_dict(): + await test_get_study_async(request_type=dict) + + +def test_get_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetStudyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + call.return_value = study.Study() + client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetStudyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + await client.get_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_study( + vizier_service.GetStudyRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_study( + vizier_service.GetStudyRequest(), + name='name_value', + ) + + +def test_list_studies(transport: str = 'grpc', request_type=vizier_service.ListStudiesRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListStudiesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListStudiesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListStudiesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_studies_from_dict(): + test_list_studies(request_type=dict) + + +def test_list_studies_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + client.list_studies() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListStudiesRequest() + + +@pytest.mark.asyncio +async def test_list_studies_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListStudiesRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListStudiesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListStudiesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_studies_async_from_dict(): + await test_list_studies_async(request_type=dict) + + +def test_list_studies_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListStudiesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + call.return_value = vizier_service.ListStudiesResponse() + client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_studies_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListStudiesRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) + await client.list_studies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_studies_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListStudiesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_studies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_studies_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_studies( + vizier_service.ListStudiesRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_studies_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListStudiesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_studies( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_studies_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_studies( + vizier_service.ListStudiesRequest(), + parent='parent_value', + ) + + +def test_list_studies_pager(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_studies(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, study.Study) + for i in results) + +def test_list_studies_pages(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + pages = list(client.list_studies(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_studies_async_pager(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_studies(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, study.Study) + for i in responses) + +@pytest.mark.asyncio +async def test_list_studies_async_pages(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_studies), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + study.Study(), + ], + next_page_token='abc', + ), + vizier_service.ListStudiesResponse( + studies=[], + next_page_token='def', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + ], + next_page_token='ghi', + ), + vizier_service.ListStudiesResponse( + studies=[ + study.Study(), + study.Study(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_studies(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_delete_study(transport: str = 'grpc', request_type=vizier_service.DeleteStudyRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteStudyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_study_from_dict(): + test_delete_study(request_type=dict) + + +def test_delete_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + client.delete_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteStudyRequest() + + +@pytest.mark.asyncio +async def test_delete_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteStudyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_study_async_from_dict(): + await test_delete_study_async(request_type=dict) + + +def test_delete_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteStudyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + call.return_value = None + client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteStudyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_study( + vizier_service.DeleteStudyRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_study( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_study( + vizier_service.DeleteStudyRequest(), + name='name_value', + ) + + +def test_lookup_study(transport: str = 'grpc', request_type=vizier_service.LookupStudyRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + ) + response = client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.LookupStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +def test_lookup_study_from_dict(): + test_lookup_study(request_type=dict) + + +def test_lookup_study_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + client.lookup_study() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.LookupStudyRequest() + + +@pytest.mark.asyncio +async def test_lookup_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.LookupStudyRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( + name='name_value', + display_name='display_name_value', + state=study.Study.State.ACTIVE, + inactive_reason='inactive_reason_value', + )) + response = await client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.LookupStudyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Study) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == study.Study.State.ACTIVE + assert response.inactive_reason == 'inactive_reason_value' + + +@pytest.mark.asyncio +async def test_lookup_study_async_from_dict(): + await test_lookup_study_async(request_type=dict) + + +def test_lookup_study_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.LookupStudyRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + call.return_value = study.Study() + client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_lookup_study_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.LookupStudyRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + await client.lookup_study(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_lookup_study_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.lookup_study( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_lookup_study_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.lookup_study( + vizier_service.LookupStudyRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_lookup_study_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.lookup_study), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Study() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.lookup_study( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_lookup_study_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.lookup_study( + vizier_service.LookupStudyRequest(), + parent='parent_value', + ) + + +def test_suggest_trials(transport: str = 'grpc', request_type=vizier_service.SuggestTrialsRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.SuggestTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_suggest_trials_from_dict(): + test_suggest_trials(request_type=dict) + + +def test_suggest_trials_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + client.suggest_trials() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.SuggestTrialsRequest() + + +@pytest.mark.asyncio +async def test_suggest_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.SuggestTrialsRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.SuggestTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_suggest_trials_async_from_dict(): + await test_suggest_trials_async(request_type=dict) + + +def test_suggest_trials_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.SuggestTrialsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_suggest_trials_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.SuggestTrialsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.suggest_trials), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.suggest_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_trial(transport: str = 'grpc', request_type=vizier_service.CreateTrialRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_create_trial_from_dict(): + test_create_trial(request_type=dict) + + +def test_create_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + client.create_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateTrialRequest() + + +@pytest.mark.asyncio +async def test_create_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CreateTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_create_trial_async_from_dict(): + await test_create_trial_async(request_type=dict) + + +def test_create_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateTrialRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + call.return_value = study.Trial() + client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CreateTrialRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.create_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_trial_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_trial( + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].trial == study.Trial(name='name_value') + + +def test_create_trial_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_trial( + vizier_service.CreateTrialRequest(), + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_trial_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_trial( + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + assert args[0].trial == study.Trial(name='name_value') + + +@pytest.mark.asyncio +async def test_create_trial_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_trial( + vizier_service.CreateTrialRequest(), + parent='parent_value', + trial=study.Trial(name='name_value'), + ) + + +def test_get_trial(transport: str = 'grpc', request_type=vizier_service.GetTrialRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_get_trial_from_dict(): + test_get_trial(request_type=dict) + + +def test_get_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + client.get_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetTrialRequest() + + +@pytest.mark.asyncio +async def test_get_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.GetTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_get_trial_async_from_dict(): + await test_get_trial_async(request_type=dict) + + +def test_get_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetTrialRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + call.return_value = study.Trial() + client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.GetTrialRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.get_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_trial_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_get_trial_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_trial( + vizier_service.GetTrialRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_trial_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_trial_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_trial( + vizier_service.GetTrialRequest(), + name='name_value', + ) + + +def test_list_trials(transport: str = 'grpc', request_type=vizier_service.ListTrialsRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListTrialsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrialsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_trials_from_dict(): + test_list_trials(request_type=dict) + + +def test_list_trials_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + client.list_trials() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListTrialsRequest() + + +@pytest.mark.asyncio +async def test_list_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListTrialsRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTrialsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_trials_async_from_dict(): + await test_list_trials_async(request_type=dict) + + +def test_list_trials_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListTrialsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + call.return_value = vizier_service.ListTrialsResponse() + client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_trials_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListTrialsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) + await client.list_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_trials_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListTrialsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_trials_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_trials( + vizier_service.ListTrialsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_trials_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListTrialsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_trials_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_trials( + vizier_service.ListTrialsRequest(), + parent='parent_value', + ) + + +def test_list_trials_pager(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_trials(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, study.Trial) + for i in results) + +def test_list_trials_pages(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + pages = list(client.list_trials(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_trials_async_pager(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_trials(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, study.Trial) + for i in responses) + +@pytest.mark.asyncio +async def test_list_trials_async_pages(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_trials), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + study.Trial(), + ], + next_page_token='abc', + ), + vizier_service.ListTrialsResponse( + trials=[], + next_page_token='def', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + ], + next_page_token='ghi', + ), + vizier_service.ListTrialsResponse( + trials=[ + study.Trial(), + study.Trial(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_trials(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_add_trial_measurement(transport: str = 'grpc', request_type=vizier_service.AddTrialMeasurementRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.AddTrialMeasurementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_add_trial_measurement_from_dict(): + test_add_trial_measurement(request_type=dict) + + +def test_add_trial_measurement_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + client.add_trial_measurement() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.AddTrialMeasurementRequest() + + +@pytest.mark.asyncio +async def test_add_trial_measurement_async(transport: str = 'grpc_asyncio', request_type=vizier_service.AddTrialMeasurementRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.AddTrialMeasurementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_add_trial_measurement_async_from_dict(): + await test_add_trial_measurement_async(request_type=dict) + + +def test_add_trial_measurement_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.AddTrialMeasurementRequest() + + request.trial_name = 'trial_name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + call.return_value = study.Trial() + client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_add_trial_measurement_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.AddTrialMeasurementRequest() + + request.trial_name = 'trial_name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.add_trial_measurement), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.add_trial_measurement(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name/value', + ) in kw['metadata'] + + +def test_complete_trial(transport: str = 'grpc', request_type=vizier_service.CompleteTrialRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CompleteTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_complete_trial_from_dict(): + test_complete_trial(request_type=dict) + + +def test_complete_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + client.complete_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CompleteTrialRequest() + + +@pytest.mark.asyncio +async def test_complete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CompleteTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CompleteTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_complete_trial_async_from_dict(): + await test_complete_trial_async(request_type=dict) + + +def test_complete_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CompleteTrialRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + call.return_value = study.Trial() + client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_complete_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CompleteTrialRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.complete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_trial(transport: str = 'grpc', request_type=vizier_service.DeleteTrialRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteTrialRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_trial_from_dict(): + test_delete_trial(request_type=dict) + + +def test_delete_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + client.delete_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteTrialRequest() + + +@pytest.mark.asyncio +async def test_delete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.DeleteTrialRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_trial_async_from_dict(): + await test_delete_trial_async(request_type=dict) + + +def test_delete_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteTrialRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + call.return_value = None + client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.DeleteTrialRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_trial_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +def test_delete_trial_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_trial( + vizier_service.DeleteTrialRequest(), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_trial_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_trial( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_trial_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_trial( + vizier_service.DeleteTrialRequest(), + name='name_value', + ) + + +def test_check_trial_early_stopping_state(transport: str = 'grpc', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_check_trial_early_stopping_state_from_dict(): + test_check_trial_early_stopping_state(request_type=dict) + + +def test_check_trial_early_stopping_state_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + client.check_trial_early_stopping_state() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() + + +@pytest.mark.asyncio +async def test_check_trial_early_stopping_state_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_check_trial_early_stopping_state_async_from_dict(): + await test_check_trial_early_stopping_state_async(request_type=dict) + + +def test_check_trial_early_stopping_state_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CheckTrialEarlyStoppingStateRequest() + + request.trial_name = 'trial_name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_check_trial_early_stopping_state_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.CheckTrialEarlyStoppingStateRequest() + + request.trial_name = 'trial_name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_trial_early_stopping_state), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.check_trial_early_stopping_state(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'trial_name=trial_name/value', + ) in kw['metadata'] + + +def test_stop_trial(transport: str = 'grpc', request_type=vizier_service.StopTrialRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + ) + response = client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.StopTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +def test_stop_trial_from_dict(): + test_stop_trial(request_type=dict) + + +def test_stop_trial_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + client.stop_trial() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.StopTrialRequest() + + +@pytest.mark.asyncio +async def test_stop_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.StopTrialRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( + name='name_value', + id='id_value', + state=study.Trial.State.REQUESTED, + client_id='client_id_value', + infeasible_reason='infeasible_reason_value', + custom_job='custom_job_value', + )) + response = await client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.StopTrialRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, study.Trial) + assert response.name == 'name_value' + assert response.id == 'id_value' + assert response.state == study.Trial.State.REQUESTED + assert response.client_id == 'client_id_value' + assert response.infeasible_reason == 'infeasible_reason_value' + assert response.custom_job == 'custom_job_value' + + +@pytest.mark.asyncio +async def test_stop_trial_async_from_dict(): + await test_stop_trial_async(request_type=dict) + + +def test_stop_trial_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.StopTrialRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + call.return_value = study.Trial() + client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_stop_trial_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.StopTrialRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.stop_trial), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) + await client.stop_trial(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_list_optimal_trials(transport: str = 'grpc', request_type=vizier_service.ListOptimalTrialsRequest): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListOptimalTrialsResponse( + ) + response = client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListOptimalTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, vizier_service.ListOptimalTrialsResponse) + + +def test_list_optimal_trials_from_dict(): + test_list_optimal_trials(request_type=dict) + + +def test_list_optimal_trials_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + client.list_optimal_trials() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListOptimalTrialsRequest() + + +@pytest.mark.asyncio +async def test_list_optimal_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListOptimalTrialsRequest): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse( + )) + response = await client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == vizier_service.ListOptimalTrialsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, vizier_service.ListOptimalTrialsResponse) + + +@pytest.mark.asyncio +async def test_list_optimal_trials_async_from_dict(): + await test_list_optimal_trials_async(request_type=dict) + + +def test_list_optimal_trials_field_headers(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListOptimalTrialsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + call.return_value = vizier_service.ListOptimalTrialsResponse() + client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_optimal_trials_field_headers_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vizier_service.ListOptimalTrialsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) + await client.list_optimal_trials(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_optimal_trials_flattened(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListOptimalTrialsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_optimal_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_optimal_trials_flattened_error(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_optimal_trials( + vizier_service.ListOptimalTrialsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_optimal_trials_flattened_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_optimal_trials), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = vizier_service.ListOptimalTrialsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_optimal_trials( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_optimal_trials_flattened_error_async(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_optimal_trials( + vizier_service.ListOptimalTrialsRequest(), + parent='parent_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VizierServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = VizierServiceClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.VizierServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.VizierServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.VizierServiceGrpcTransport, + transports.VizierServiceGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.VizierServiceGrpcTransport, + ) + +def test_vizier_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.VizierServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_vizier_service_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.VizierServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_study', + 'get_study', + 'list_studies', + 'delete_study', + 'lookup_study', + 'suggest_trials', + 'create_trial', + 'get_trial', + 'list_trials', + 'add_trial_measurement', + 'complete_trial', + 'delete_trial', + 'check_trial_early_stopping_state', + 'stop_trial', + 'list_optimal_trials', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +@requires_google_auth_gte_1_25_0 +def test_vizier_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VizierServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_vizier_service_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VizierServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_vizier_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VizierServiceTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_vizier_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + VizierServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_vizier_service_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + VizierServiceClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VizierServiceGrpcTransport, + transports.VizierServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_vizier_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VizierServiceGrpcTransport, + transports.VizierServiceGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_vizier_service_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.VizierServiceGrpcTransport, grpc_helpers), + (transports.VizierServiceGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_vizier_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) +def test_vizier_service_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_vizier_service_host_no_port(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:443' + + +def test_vizier_service_host_with_port(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), + ) + assert client.transport._host == 'aiplatform.googleapis.com:8000' + +def test_vizier_service_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VizierServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_vizier_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VizierServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) +def test_vizier_service_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) +def test_vizier_service_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_vizier_service_grpc_lro_client(): + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_vizier_service_grpc_lro_async_client(): + client = VizierServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_custom_job_path(): + project = "squid" + location = "clam" + custom_job = "whelk" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) + actual = VizierServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "octopus", + "location": "oyster", + "custom_job": "nudibranch", + } + path = VizierServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_custom_job_path(path) + assert expected == actual + +def test_study_path(): + project = "cuttlefish" + location = "mussel" + study = "winkle" + expected = "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) + actual = VizierServiceClient.study_path(project, location, study) + assert expected == actual + + +def test_parse_study_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "study": "abalone", + } + path = VizierServiceClient.study_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_study_path(path) + assert expected == actual + +def test_trial_path(): + project = "squid" + location = "clam" + study = "whelk" + trial = "octopus" + expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) + actual = VizierServiceClient.trial_path(project, location, study, trial) + assert expected == actual + + +def test_parse_trial_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "study": "cuttlefish", + "trial": "mussel", + } + path = VizierServiceClient.trial_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_trial_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = VizierServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = VizierServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format(folder=folder, ) + actual = VizierServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = VizierServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format(organization=organization, ) + actual = VizierServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = VizierServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format(project=project, ) + actual = VizierServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = VizierServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = VizierServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = VizierServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = VizierServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: + client = VizierServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: + transport_class = VizierServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py new file mode 100644 index 0000000000..b54a5fcc42 --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# From 10c1f44963d0719d63a5f968b0ee6cee3c0df619 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Wed, 14 Jul 2021 18:50:47 +0000 Subject: [PATCH 2/2] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/master/packages/owl-bot/README.md --- .../types/image_classification.py | 4 +- .../types/image_object_detection.py | 4 +- .../instance_v1beta1/types/text_extraction.py | 2 +- .../types/video_classification.py | 23 +- .../types/classification.py | 3 +- .../definition_v1beta1/types/automl_tables.py | 4 + .../types/automl_time_series_forecasting.py | 4 + .../types/automl_video_action_recognition.py | 2 + .../export_evaluated_data_items_config.py | 1 - google/cloud/aiplatform_v1/__init__.py | 30 + .../cloud/aiplatform_v1/gapic_metadata.json | 50 + .../services/job_service/async_client.py | 2 + .../services/job_service/client.py | 15 + .../services/migration_service/client.py | 22 +- .../services/pipeline_service/async_client.py | 432 + .../services/pipeline_service/client.py | 528 + .../services/pipeline_service/pagers.py | 129 + .../pipeline_service/transports/base.py | 65 + .../pipeline_service/transports/grpc.py | 152 + .../transports/grpc_asyncio.py | 157 + .../prediction_service/async_client.py | 26 +- .../services/prediction_service/client.py | 30 +- google/cloud/aiplatform_v1/types/__init__.py | 32 + .../aiplatform_v1/types/accelerator_type.py | 3 +- .../cloud/aiplatform_v1/types/artifact.py | 46 +- .../types/batch_prediction_job.py | 12 +- .../cloud/aiplatform_v1/types/context.py | 39 +- .../cloud/aiplatform_v1/types/execution.py | 41 +- google/cloud/aiplatform_v1/types/io.py | 1 + google/cloud/aiplatform_v1/types/job_state.py | 1 + .../aiplatform_v1/types/machine_resources.py | 74 +- .../cloud/aiplatform_v1/types/pipeline_job.py | 213 +- .../aiplatform_v1/types/pipeline_service.py | 179 +- google/cloud/aiplatform_v1/types/study.py | 66 + .../cloud/aiplatform_v1/types/value.py | 25 +- .../services/migration_service/client.py | 22 +- owl-bot-staging/v1/.coveragerc | 17 - owl-bot-staging/v1/MANIFEST.in | 2 - owl-bot-staging/v1/README.rst | 49 - .../v1/docs/aiplatform_v1/dataset_service.rst | 10 - .../docs/aiplatform_v1/endpoint_service.rst | 10 - .../v1/docs/aiplatform_v1/job_service.rst | 10 - .../docs/aiplatform_v1/migration_service.rst | 10 - .../v1/docs/aiplatform_v1/model_service.rst | 10 - .../docs/aiplatform_v1/pipeline_service.rst | 10 - .../docs/aiplatform_v1/prediction_service.rst | 6 - .../v1/docs/aiplatform_v1/services.rst | 13 - .../aiplatform_v1/specialist_pool_service.rst | 10 - .../v1/docs/aiplatform_v1/types.rst | 7 - owl-bot-staging/v1/docs/conf.py | 376 - .../v1/docs/definition_v1/services.rst | 4 - .../v1/docs/definition_v1/types.rst | 7 - owl-bot-staging/v1/docs/index.rst | 7 - .../v1/docs/instance_v1/services.rst | 4 - owl-bot-staging/v1/docs/instance_v1/types.rst | 7 - .../v1/docs/params_v1/services.rst | 4 - owl-bot-staging/v1/docs/params_v1/types.rst | 7 - .../v1/docs/prediction_v1/services.rst | 4 - .../v1/docs/prediction_v1/types.rst | 7 - .../v1/google/cloud/aiplatform/__init__.py | 389 - .../v1/google/cloud/aiplatform/py.typed | 2 - .../v1/schema/predict/instance/__init__.py | 37 - .../v1/schema/predict/instance/py.typed | 2 - .../v1/schema/predict/instance_v1/__init__.py | 38 - .../predict/instance_v1/gapic_metadata.json | 7 - .../v1/schema/predict/instance_v1/py.typed | 2 - .../predict/instance_v1/services/__init__.py | 15 - .../predict/instance_v1/types/__init__.py | 54 - .../instance_v1/types/image_classification.py | 55 - .../types/image_object_detection.py | 55 - .../instance_v1/types/image_segmentation.py | 49 - .../instance_v1/types/text_classification.py | 48 - .../instance_v1/types/text_extraction.py | 61 - .../instance_v1/types/text_sentiment.py | 48 - .../types/video_action_recognition.py | 72 - .../instance_v1/types/video_classification.py | 72 - .../types/video_object_tracking.py | 72 - .../v1/schema/predict/params/__init__.py | 31 - .../v1/schema/predict/params/py.typed | 2 - .../v1/schema/predict/params_v1/__init__.py | 32 - .../predict/params_v1/gapic_metadata.json | 7 - .../v1/schema/predict/params_v1/py.typed | 2 - .../predict/params_v1/services/__init__.py | 15 - .../predict/params_v1/types/__init__.py | 42 - .../params_v1/types/image_classification.py | 51 - .../params_v1/types/image_object_detection.py | 52 - .../params_v1/types/image_segmentation.py | 44 - .../types/video_action_recognition.py | 52 - .../params_v1/types/video_classification.py | 94 - .../params_v1/types/video_object_tracking.py | 60 - .../v1/schema/predict/prediction/__init__.py | 39 - .../v1/schema/predict/prediction/py.typed | 2 - .../schema/predict/prediction_v1/__init__.py | 40 - .../predict/prediction_v1/gapic_metadata.json | 7 - .../v1/schema/predict/prediction_v1/py.typed | 2 - .../prediction_v1/services/__init__.py | 15 - .../predict/prediction_v1/types/__init__.py | 58 - .../prediction_v1/types/classification.py | 56 - .../types/image_object_detection.py | 72 - .../prediction_v1/types/image_segmentation.py | 61 - .../types/tabular_classification.py | 51 - .../prediction_v1/types/tabular_regression.py | 52 - .../prediction_v1/types/text_extraction.py | 77 - .../prediction_v1/types/text_sentiment.py | 47 - .../types/video_action_recognition.py | 84 - .../types/video_classification.py | 102 - .../types/video_object_tracking.py | 144 - .../schema/trainingjob/definition/__init__.py | 69 - .../v1/schema/trainingjob/definition/py.typed | 2 - .../trainingjob/definition_v1/__init__.py | 70 - .../definition_v1/gapic_metadata.json | 7 - .../schema/trainingjob/definition_v1/py.typed | 2 - .../definition_v1/services/__init__.py | 15 - .../definition_v1/types/__init__.py | 90 - .../types/automl_image_classification.py | 156 - .../types/automl_image_object_detection.py | 137 - .../types/automl_image_segmentation.py | 131 - .../definition_v1/types/automl_tables.py | 499 - .../types/automl_text_classification.py | 57 - .../types/automl_text_extraction.py | 48 - .../types/automl_text_sentiment.py | 66 - .../types/automl_video_action_recognition.py | 65 - .../types/automl_video_classification.py | 64 - .../types/automl_video_object_tracking.py | 67 - .../export_evaluated_data_items_config.py | 57 - .../v1/google/cloud/aiplatform_v1/__init__.py | 390 - .../cloud/aiplatform_v1/gapic_metadata.json | 771 -- .../v1/google/cloud/aiplatform_v1/py.typed | 2 - .../cloud/aiplatform_v1/services/__init__.py | 15 - .../services/dataset_service/__init__.py | 22 - .../services/dataset_service/async_client.py | 1074 -- .../services/dataset_service/client.py | 1288 --- .../services/dataset_service/pagers.py | 387 - .../dataset_service/transports/__init__.py | 33 - .../dataset_service/transports/base.py | 304 - .../dataset_service/transports/grpc.py | 506 - .../transports/grpc_asyncio.py | 510 - .../services/endpoint_service/__init__.py | 22 - .../services/endpoint_service/async_client.py | 860 -- .../services/endpoint_service/client.py | 1054 -- .../services/endpoint_service/pagers.py | 141 - .../endpoint_service/transports/__init__.py | 33 - .../endpoint_service/transports/base.py | 261 - .../endpoint_service/transports/grpc.py | 430 - .../transports/grpc_asyncio.py | 434 - .../services/job_service/__init__.py | 22 - .../services/job_service/async_client.py | 1913 ---- .../services/job_service/client.py | 2163 ---- .../services/job_service/pagers.py | 510 - .../job_service/transports/__init__.py | 33 - .../services/job_service/transports/base.py | 450 - .../services/job_service/transports/grpc.py | 818 -- .../job_service/transports/grpc_asyncio.py | 822 -- .../services/migration_service/__init__.py | 22 - .../migration_service/async_client.py | 376 - .../services/migration_service/client.py | 617 -- .../services/migration_service/pagers.py | 141 - .../migration_service/transports/__init__.py | 33 - .../migration_service/transports/base.py | 189 - .../migration_service/transports/grpc.py | 303 - .../transports/grpc_asyncio.py | 307 - .../services/model_service/__init__.py | 22 - .../services/model_service/async_client.py | 1059 -- .../services/model_service/client.py | 1282 --- .../services/model_service/pagers.py | 387 - .../model_service/transports/__init__.py | 33 - .../services/model_service/transports/base.py | 305 - .../services/model_service/transports/grpc.py | 514 - .../model_service/transports/grpc_asyncio.py | 518 - .../services/pipeline_service/__init__.py | 22 - .../services/pipeline_service/async_client.py | 1069 -- .../services/pipeline_service/client.py | 1328 --- .../services/pipeline_service/pagers.py | 264 - .../pipeline_service/transports/__init__.py | 33 - .../pipeline_service/transports/base.py | 306 - .../pipeline_service/transports/grpc.py | 539 - .../transports/grpc_asyncio.py | 543 - .../services/prediction_service/__init__.py | 22 - .../prediction_service/async_client.py | 272 - .../services/prediction_service/client.py | 459 - .../prediction_service/transports/__init__.py | 33 - .../prediction_service/transports/base.py | 168 - .../prediction_service/transports/grpc.py | 252 - .../transports/grpc_asyncio.py | 256 - .../specialist_pool_service/__init__.py | 22 - .../specialist_pool_service/async_client.py | 650 -- .../specialist_pool_service/client.py | 837 -- .../specialist_pool_service/pagers.py | 141 - .../transports/__init__.py | 33 - .../transports/base.py | 232 - .../transports/grpc.py | 382 - .../transports/grpc_asyncio.py | 386 - .../cloud/aiplatform_v1/types/__init__.py | 429 - .../aiplatform_v1/types/accelerator_type.py | 38 - .../cloud/aiplatform_v1/types/annotation.py | 129 - .../aiplatform_v1/types/annotation_spec.py | 78 - .../types/batch_prediction_job.py | 407 - .../aiplatform_v1/types/completion_stats.py | 63 - .../cloud/aiplatform_v1/types/custom_job.py | 391 - .../cloud/aiplatform_v1/types/data_item.py | 101 - .../aiplatform_v1/types/data_labeling_job.py | 332 - .../cloud/aiplatform_v1/types/dataset.py | 220 - .../aiplatform_v1/types/dataset_service.py | 542 - .../aiplatform_v1/types/deployed_model_ref.py | 47 - .../aiplatform_v1/types/encryption_spec.py | 47 - .../cloud/aiplatform_v1/types/endpoint.py | 238 - .../aiplatform_v1/types/endpoint_service.py | 394 - .../cloud/aiplatform_v1/types/env_var.py | 56 - .../types/hyperparameter_tuning_job.py | 182 - .../v1/google/cloud/aiplatform_v1/types/io.py | 130 - .../cloud/aiplatform_v1/types/job_service.py | 723 -- .../cloud/aiplatform_v1/types/job_state.py | 41 - .../aiplatform_v1/types/machine_resources.py | 307 - .../types/manual_batch_tuning_parameters.py | 49 - .../types/migratable_resource.py | 209 - .../aiplatform_v1/types/migration_service.py | 439 - .../google/cloud/aiplatform_v1/types/model.py | 707 -- .../aiplatform_v1/types/model_evaluation.py | 86 - .../types/model_evaluation_slice.py | 109 - .../aiplatform_v1/types/model_service.py | 583 -- .../cloud/aiplatform_v1/types/operation.py | 81 - .../aiplatform_v1/types/pipeline_service.py | 381 - .../aiplatform_v1/types/pipeline_state.py | 40 - .../aiplatform_v1/types/prediction_service.py | 103 - .../aiplatform_v1/types/specialist_pool.py | 80 - .../types/specialist_pool_service.py | 237 - .../google/cloud/aiplatform_v1/types/study.py | 609 -- .../aiplatform_v1/types/training_pipeline.py | 547 - .../types/user_action_reference.py | 64 - owl-bot-staging/v1/mypy.ini | 3 - owl-bot-staging/v1/noxfile.py | 132 - .../v1/schema/predict/instance/.coveragerc | 17 - .../v1/schema/predict/instance/MANIFEST.in | 2 - .../v1/schema/predict/instance/README.rst | 49 - .../v1/schema/predict/instance/docs/conf.py | 376 - .../v1/schema/predict/instance/docs/index.rst | 7 - .../instance/docs/instance_v1/services.rst | 4 - .../instance/docs/instance_v1/types.rst | 7 - .../v1/schema/predict/instance/__init__.py | 37 - .../v1/schema/predict/instance/py.typed | 2 - .../v1/schema/predict/instance_v1/__init__.py | 38 - .../predict/instance_v1/gapic_metadata.json | 7 - .../v1/schema/predict/instance_v1/py.typed | 2 - .../predict/instance_v1/services/__init__.py | 15 - .../predict/instance_v1/types/__init__.py | 54 - .../instance_v1/types/image_classification.py | 55 - .../types/image_object_detection.py | 55 - .../instance_v1/types/image_segmentation.py | 49 - .../instance_v1/types/text_classification.py | 48 - .../instance_v1/types/text_extraction.py | 61 - .../instance_v1/types/text_sentiment.py | 48 - .../types/video_action_recognition.py | 72 - .../instance_v1/types/video_classification.py | 72 - .../types/video_object_tracking.py | 72 - .../v1/schema/predict/instance/mypy.ini | 3 - .../v1/schema/predict/instance/noxfile.py | 132 - .../scripts/fixup_instance_v1_keywords.py | 175 - .../v1/schema/predict/instance/setup.py | 53 - .../schema/predict/instance/tests/__init__.py | 16 - .../predict/instance/tests/unit/__init__.py | 16 - .../instance/tests/unit/gapic/__init__.py | 16 - .../tests/unit/gapic/instance_v1/__init__.py | 16 - .../v1/schema/predict/params/.coveragerc | 17 - .../v1/schema/predict/params/MANIFEST.in | 2 - .../v1/schema/predict/params/README.rst | 49 - .../v1/schema/predict/params/docs/conf.py | 376 - .../v1/schema/predict/params/docs/index.rst | 7 - .../params/docs/params_v1/services.rst | 4 - .../predict/params/docs/params_v1/types.rst | 7 - .../v1/schema/predict/params/__init__.py | 31 - .../v1/schema/predict/params/py.typed | 2 - .../v1/schema/predict/params_v1/__init__.py | 32 - .../predict/params_v1/gapic_metadata.json | 7 - .../v1/schema/predict/params_v1/py.typed | 2 - .../predict/params_v1/services/__init__.py | 15 - .../predict/params_v1/types/__init__.py | 42 - .../params_v1/types/image_classification.py | 51 - .../params_v1/types/image_object_detection.py | 52 - .../params_v1/types/image_segmentation.py | 44 - .../types/video_action_recognition.py | 52 - .../params_v1/types/video_classification.py | 94 - .../params_v1/types/video_object_tracking.py | 60 - .../v1/schema/predict/params/mypy.ini | 3 - .../v1/schema/predict/params/noxfile.py | 132 - .../scripts/fixup_params_v1_keywords.py | 175 - .../v1/schema/predict/params/setup.py | 53 - .../schema/predict/params/tests/__init__.py | 16 - .../predict/params/tests/unit/__init__.py | 16 - .../params/tests/unit/gapic/__init__.py | 16 - .../tests/unit/gapic/params_v1/__init__.py | 16 - .../v1/schema/predict/prediction/.coveragerc | 17 - .../v1/schema/predict/prediction/MANIFEST.in | 2 - .../v1/schema/predict/prediction/README.rst | 49 - .../v1/schema/predict/prediction/docs/conf.py | 376 - .../schema/predict/prediction/docs/index.rst | 7 - .../docs/prediction_v1/services.rst | 4 - .../prediction/docs/prediction_v1/types.rst | 7 - .../v1/schema/predict/prediction/__init__.py | 39 - .../v1/schema/predict/prediction/py.typed | 2 - .../schema/predict/prediction_v1/__init__.py | 40 - .../predict/prediction_v1/gapic_metadata.json | 7 - .../v1/schema/predict/prediction_v1/py.typed | 2 - .../prediction_v1/services/__init__.py | 15 - .../predict/prediction_v1/types/__init__.py | 58 - .../prediction_v1/types/classification.py | 56 - .../types/image_object_detection.py | 72 - .../prediction_v1/types/image_segmentation.py | 61 - .../types/tabular_classification.py | 51 - .../prediction_v1/types/tabular_regression.py | 52 - .../prediction_v1/types/text_extraction.py | 77 - .../prediction_v1/types/text_sentiment.py | 47 - .../types/video_action_recognition.py | 84 - .../types/video_classification.py | 102 - .../types/video_object_tracking.py | 144 - .../v1/schema/predict/prediction/mypy.ini | 3 - .../v1/schema/predict/prediction/noxfile.py | 132 - .../scripts/fixup_prediction_v1_keywords.py | 175 - .../v1/schema/predict/prediction/setup.py | 53 - .../predict/prediction/tests/__init__.py | 16 - .../predict/prediction/tests/unit/__init__.py | 16 - .../prediction/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/prediction_v1/__init__.py | 16 - .../scripts/fixup_aiplatform_v1_keywords.py | 240 - .../scripts/fixup_definition_v1_keywords.py | 175 - .../v1/scripts/fixup_instance_v1_keywords.py | 175 - .../v1/scripts/fixup_params_v1_keywords.py | 175 - .../scripts/fixup_prediction_v1_keywords.py | 175 - owl-bot-staging/v1/setup.py | 53 - owl-bot-staging/v1/tests/__init__.py | 16 - owl-bot-staging/v1/tests/unit/__init__.py | 16 - .../v1/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/aiplatform_v1/__init__.py | 16 - .../aiplatform_v1/test_dataset_service.py | 3983 -------- .../aiplatform_v1/test_endpoint_service.py | 2865 ------ .../gapic/aiplatform_v1/test_job_service.py | 6666 ------------ .../aiplatform_v1/test_migration_service.py | 1753 ---- .../gapic/aiplatform_v1/test_model_service.py | 4051 -------- .../aiplatform_v1/test_pipeline_service.py | 3916 ------- .../aiplatform_v1/test_prediction_service.py | 1175 --- .../test_specialist_pool_service.py | 2346 ----- .../unit/gapic/definition_v1/__init__.py | 16 - .../tests/unit/gapic/instance_v1/__init__.py | 16 - .../v1/tests/unit/gapic/params_v1/__init__.py | 16 - .../unit/gapic/prediction_v1/__init__.py | 16 - owl-bot-staging/v1beta1/.coveragerc | 17 - owl-bot-staging/v1beta1/MANIFEST.in | 2 - owl-bot-staging/v1beta1/README.rst | 49 - .../aiplatform_v1beta1/dataset_service.rst | 10 - .../aiplatform_v1beta1/endpoint_service.rst | 10 - .../featurestore_online_serving_service.rst | 6 - .../featurestore_service.rst | 10 - .../index_endpoint_service.rst | 10 - .../docs/aiplatform_v1beta1/index_service.rst | 10 - .../docs/aiplatform_v1beta1/job_service.rst | 10 - .../aiplatform_v1beta1/metadata_service.rst | 10 - .../aiplatform_v1beta1/migration_service.rst | 10 - .../docs/aiplatform_v1beta1/model_service.rst | 10 - .../aiplatform_v1beta1/pipeline_service.rst | 10 - .../aiplatform_v1beta1/prediction_service.rst | 6 - .../docs/aiplatform_v1beta1/services.rst | 20 - .../specialist_pool_service.rst | 10 - .../tensorboard_service.rst | 10 - .../v1beta1/docs/aiplatform_v1beta1/types.rst | 7 - .../aiplatform_v1beta1/vizier_service.rst | 10 - owl-bot-staging/v1beta1/docs/conf.py | 376 - .../docs/definition_v1beta1/services.rst | 4 - .../v1beta1/docs/definition_v1beta1/types.rst | 7 - owl-bot-staging/v1beta1/docs/index.rst | 7 - .../docs/instance_v1beta1/services.rst | 4 - .../v1beta1/docs/instance_v1beta1/types.rst | 7 - .../v1beta1/docs/params_v1beta1/services.rst | 4 - .../v1beta1/docs/params_v1beta1/types.rst | 7 - .../docs/prediction_v1beta1/services.rst | 4 - .../v1beta1/docs/prediction_v1beta1/types.rst | 7 - .../google/cloud/aiplatform/__init__.py | 875 -- .../v1beta1/google/cloud/aiplatform/py.typed | 2 - .../schema/predict/instance/__init__.py | 37 - .../v1beta1/schema/predict/instance/py.typed | 2 - .../predict/instance_v1beta1/__init__.py | 38 - .../instance_v1beta1/gapic_metadata.json | 7 - .../schema/predict/instance_v1beta1/py.typed | 2 - .../instance_v1beta1/services/__init__.py | 15 - .../instance_v1beta1/types/__init__.py | 54 - .../types/image_classification.py | 55 - .../types/image_object_detection.py | 55 - .../types/image_segmentation.py | 49 - .../types/text_classification.py | 48 - .../instance_v1beta1/types/text_extraction.py | 61 - .../instance_v1beta1/types/text_sentiment.py | 48 - .../types/video_action_recognition.py | 72 - .../types/video_classification.py | 72 - .../types/video_object_tracking.py | 72 - .../v1beta1/schema/predict/params/__init__.py | 31 - .../v1beta1/schema/predict/params/py.typed | 2 - .../schema/predict/params_v1beta1/__init__.py | 32 - .../params_v1beta1/gapic_metadata.json | 7 - .../schema/predict/params_v1beta1/py.typed | 2 - .../params_v1beta1/services/__init__.py | 15 - .../predict/params_v1beta1/types/__init__.py | 42 - .../types/image_classification.py | 51 - .../types/image_object_detection.py | 52 - .../types/image_segmentation.py | 44 - .../types/video_action_recognition.py | 52 - .../types/video_classification.py | 94 - .../types/video_object_tracking.py | 60 - .../schema/predict/prediction/__init__.py | 41 - .../schema/predict/prediction/py.typed | 2 - .../predict/prediction_v1beta1/__init__.py | 42 - .../prediction_v1beta1/gapic_metadata.json | 7 - .../predict/prediction_v1beta1/py.typed | 2 - .../prediction_v1beta1/services/__init__.py | 15 - .../prediction_v1beta1/types/__init__.py | 62 - .../types/classification.py | 56 - .../types/image_object_detection.py | 72 - .../types/image_segmentation.py | 61 - .../types/tabular_classification.py | 51 - .../types/tabular_regression.py | 52 - .../types/text_extraction.py | 77 - .../types/text_sentiment.py | 47 - .../types/time_series_forecasting.py | 40 - .../types/video_action_recognition.py | 84 - .../types/video_classification.py | 102 - .../types/video_object_tracking.py | 144 - .../schema/trainingjob/definition/__init__.py | 75 - .../schema/trainingjob/definition/py.typed | 2 - .../definition_v1beta1/__init__.py | 76 - .../definition_v1beta1/gapic_metadata.json | 7 - .../trainingjob/definition_v1beta1/py.typed | 2 - .../definition_v1beta1/services/__init__.py | 15 - .../definition_v1beta1/types/__init__.py | 98 - .../types/automl_image_classification.py | 156 - .../types/automl_image_object_detection.py | 137 - .../types/automl_image_segmentation.py | 131 - .../definition_v1beta1/types/automl_tables.py | 499 - .../types/automl_text_classification.py | 57 - .../types/automl_text_extraction.py | 48 - .../types/automl_text_sentiment.py | 66 - .../types/automl_time_series_forecasting.py | 477 - .../types/automl_video_action_recognition.py | 65 - .../types/automl_video_classification.py | 64 - .../types/automl_video_object_tracking.py | 67 - .../export_evaluated_data_items_config.py | 57 - .../cloud/aiplatform_v1beta1/__init__.py | 876 -- .../aiplatform_v1beta1/gapic_metadata.json | 1949 ---- .../google/cloud/aiplatform_v1beta1/py.typed | 2 - .../aiplatform_v1beta1/services/__init__.py | 15 - .../services/dataset_service/__init__.py | 22 - .../services/dataset_service/async_client.py | 1074 -- .../services/dataset_service/client.py | 1288 --- .../services/dataset_service/pagers.py | 387 - .../dataset_service/transports/__init__.py | 33 - .../dataset_service/transports/base.py | 304 - .../dataset_service/transports/grpc.py | 506 - .../transports/grpc_asyncio.py | 510 - .../services/endpoint_service/__init__.py | 22 - .../services/endpoint_service/async_client.py | 860 -- .../services/endpoint_service/client.py | 1054 -- .../services/endpoint_service/pagers.py | 141 - .../endpoint_service/transports/__init__.py | 33 - .../endpoint_service/transports/base.py | 261 - .../endpoint_service/transports/grpc.py | 430 - .../transports/grpc_asyncio.py | 434 - .../__init__.py | 22 - .../async_client.py | 325 - .../client.py | 512 - .../transports/__init__.py | 33 - .../transports/base.py | 182 - .../transports/grpc.py | 283 - .../transports/grpc_asyncio.py | 287 - .../services/featurestore_service/__init__.py | 22 - .../featurestore_service/async_client.py | 2060 ---- .../services/featurestore_service/client.py | 2265 ----- .../services/featurestore_service/pagers.py | 509 - .../transports/__init__.py | 33 - .../featurestore_service/transports/base.py | 446 - .../featurestore_service/transports/grpc.py | 803 -- .../transports/grpc_asyncio.py | 807 -- .../index_endpoint_service/__init__.py | 22 - .../index_endpoint_service/async_client.py | 817 -- .../services/index_endpoint_service/client.py | 1013 -- .../services/index_endpoint_service/pagers.py | 141 - .../transports/__init__.py | 33 - .../index_endpoint_service/transports/base.py | 261 - .../index_endpoint_service/transports/grpc.py | 433 - .../transports/grpc_asyncio.py | 437 - .../services/index_service/__init__.py | 22 - .../services/index_service/async_client.py | 633 -- .../services/index_service/client.py | 829 -- .../services/index_service/pagers.py | 141 - .../index_service/transports/__init__.py | 33 - .../services/index_service/transports/base.py | 232 - .../services/index_service/transports/grpc.py | 379 - .../index_service/transports/grpc_asyncio.py | 383 - .../services/job_service/__init__.py | 22 - .../services/job_service/async_client.py | 2615 ----- .../services/job_service/client.py | 2892 ------ .../services/job_service/pagers.py | 756 -- .../job_service/transports/__init__.py | 33 - .../services/job_service/transports/base.py | 564 -- .../services/job_service/transports/grpc.py | 1043 -- .../job_service/transports/grpc_asyncio.py | 1047 -- .../services/metadata_service/__init__.py | 22 - .../services/metadata_service/async_client.py | 2516 ----- .../services/metadata_service/client.py | 2739 ----- .../services/metadata_service/pagers.py | 633 -- .../metadata_service/transports/__init__.py | 33 - .../metadata_service/transports/base.py | 535 - .../metadata_service/transports/grpc.py | 951 -- .../transports/grpc_asyncio.py | 955 -- .../services/migration_service/__init__.py | 22 - .../migration_service/async_client.py | 376 - .../services/migration_service/client.py | 617 -- .../services/migration_service/pagers.py | 141 - .../migration_service/transports/__init__.py | 33 - .../migration_service/transports/base.py | 189 - .../migration_service/transports/grpc.py | 303 - .../transports/grpc_asyncio.py | 307 - .../services/model_service/__init__.py | 22 - .../services/model_service/async_client.py | 1060 -- .../services/model_service/client.py | 1283 --- .../services/model_service/pagers.py | 387 - .../model_service/transports/__init__.py | 33 - .../services/model_service/transports/base.py | 305 - .../services/model_service/transports/grpc.py | 514 - .../model_service/transports/grpc_asyncio.py | 518 - .../services/pipeline_service/__init__.py | 22 - .../services/pipeline_service/async_client.py | 1069 -- .../services/pipeline_service/client.py | 1328 --- .../services/pipeline_service/pagers.py | 264 - .../pipeline_service/transports/__init__.py | 33 - .../pipeline_service/transports/base.py | 306 - .../pipeline_service/transports/grpc.py | 539 - .../transports/grpc_asyncio.py | 543 - .../services/prediction_service/__init__.py | 22 - .../prediction_service/async_client.py | 404 - .../services/prediction_service/client.py | 591 -- .../prediction_service/transports/__init__.py | 33 - .../prediction_service/transports/base.py | 182 - .../prediction_service/transports/grpc.py | 289 - .../transports/grpc_asyncio.py | 293 - .../specialist_pool_service/__init__.py | 22 - .../specialist_pool_service/async_client.py | 650 -- .../specialist_pool_service/client.py | 837 -- .../specialist_pool_service/pagers.py | 141 - .../transports/__init__.py | 33 - .../transports/base.py | 232 - .../transports/grpc.py | 382 - .../transports/grpc_asyncio.py | 386 - .../services/tensorboard_service/__init__.py | 22 - .../tensorboard_service/async_client.py | 2348 ----- .../services/tensorboard_service/client.py | 2562 ----- .../services/tensorboard_service/pagers.py | 633 -- .../transports/__init__.py | 33 - .../tensorboard_service/transports/base.py | 504 - .../tensorboard_service/transports/grpc.py | 889 -- .../transports/grpc_asyncio.py | 893 -- .../services/vizier_service/__init__.py | 22 - .../services/vizier_service/async_client.py | 1279 --- .../services/vizier_service/client.py | 1489 --- .../services/vizier_service/pagers.py | 263 - .../vizier_service/transports/__init__.py | 33 - .../vizier_service/transports/base.py | 374 - .../vizier_service/transports/grpc.py | 657 -- .../vizier_service/transports/grpc_asyncio.py | 661 -- .../aiplatform_v1beta1/types/__init__.py | 947 -- .../types/accelerator_type.py | 38 - .../aiplatform_v1beta1/types/annotation.py | 129 - .../types/annotation_spec.py | 78 - .../aiplatform_v1beta1/types/artifact.py | 151 - .../types/batch_prediction_job.py | 449 - .../types/completion_stats.py | 63 - .../cloud/aiplatform_v1beta1/types/context.py | 134 - .../aiplatform_v1beta1/types/custom_job.py | 384 - .../aiplatform_v1beta1/types/data_item.py | 101 - .../types/data_labeling_job.py | 332 - .../cloud/aiplatform_v1beta1/types/dataset.py | 220 - .../types/dataset_service.py | 542 - .../types/deployed_index_ref.py | 48 - .../types/deployed_model_ref.py | 47 - .../types/encryption_spec.py | 47 - .../aiplatform_v1beta1/types/endpoint.py | 259 - .../types/endpoint_service.py | 380 - .../aiplatform_v1beta1/types/entity_type.py | 116 - .../cloud/aiplatform_v1beta1/types/env_var.py | 56 - .../cloud/aiplatform_v1beta1/types/event.py | 93 - .../aiplatform_v1beta1/types/execution.py | 145 - .../aiplatform_v1beta1/types/explanation.py | 644 -- .../types/explanation_metadata.py | 449 - .../cloud/aiplatform_v1beta1/types/feature.py | 150 - .../types/feature_monitoring_stats.py | 124 - .../types/feature_selector.py | 60 - .../aiplatform_v1beta1/types/featurestore.py | 125 - .../types/featurestore_monitoring.py | 79 - .../types/featurestore_online_service.py | 337 - .../types/featurestore_service.py | 1500 --- .../types/hyperparameter_tuning_job.py | 182 - .../cloud/aiplatform_v1beta1/types/index.py | 142 - .../types/index_endpoint.py | 308 - .../types/index_endpoint_service.py | 343 - .../aiplatform_v1beta1/types/index_service.py | 354 - .../cloud/aiplatform_v1beta1/types/io.py | 189 - .../aiplatform_v1beta1/types/job_service.py | 1068 -- .../aiplatform_v1beta1/types/job_state.py | 41 - .../types/lineage_subgraph.py | 62 - .../types/machine_resources.py | 308 - .../types/manual_batch_tuning_parameters.py | 49 - .../types/metadata_schema.py | 94 - .../types/metadata_service.py | 1197 --- .../types/metadata_store.py | 99 - .../types/migratable_resource.py | 209 - .../types/migration_service.py | 439 - .../cloud/aiplatform_v1beta1/types/model.py | 752 -- .../types/model_deployment_monitoring_job.py | 407 - .../types/model_evaluation.py | 133 - .../types/model_evaluation_slice.py | 109 - .../types/model_monitoring.py | 251 - .../aiplatform_v1beta1/types/model_service.py | 569 -- .../aiplatform_v1beta1/types/operation.py | 81 - .../aiplatform_v1beta1/types/pipeline_job.py | 436 - .../types/pipeline_service.py | 367 - .../types/pipeline_state.py | 40 - .../types/prediction_service.py | 214 - .../types/specialist_pool.py | 80 - .../types/specialist_pool_service.py | 237 - .../cloud/aiplatform_v1beta1/types/study.py | 748 -- .../aiplatform_v1beta1/types/tensorboard.py | 131 - .../types/tensorboard_data.py | 187 - .../types/tensorboard_experiment.py | 115 - .../types/tensorboard_run.py | 92 - .../types/tensorboard_service.py | 1048 -- .../types/tensorboard_time_series.py | 152 - .../types/training_pipeline.py | 548 - .../cloud/aiplatform_v1beta1/types/types.py | 82 - .../types/user_action_reference.py | 64 - .../cloud/aiplatform_v1beta1/types/value.py | 55 - .../types/vizier_service.py | 588 -- owl-bot-staging/v1beta1/mypy.ini | 3 - owl-bot-staging/v1beta1/noxfile.py | 132 - .../schema/predict/instance/.coveragerc | 17 - .../schema/predict/instance/MANIFEST.in | 2 - .../schema/predict/instance/README.rst | 49 - .../schema/predict/instance/docs/conf.py | 376 - .../schema/predict/instance/docs/index.rst | 7 - .../docs/instance_v1beta1/services.rst | 4 - .../instance/docs/instance_v1beta1/types.rst | 7 - .../schema/predict/instance/__init__.py | 37 - .../v1beta1/schema/predict/instance/py.typed | 2 - .../predict/instance_v1beta1/__init__.py | 38 - .../instance_v1beta1/gapic_metadata.json | 7 - .../schema/predict/instance_v1beta1/py.typed | 2 - .../instance_v1beta1/services/__init__.py | 15 - .../instance_v1beta1/types/__init__.py | 54 - .../types/image_classification.py | 55 - .../types/image_object_detection.py | 55 - .../types/image_segmentation.py | 49 - .../types/text_classification.py | 48 - .../instance_v1beta1/types/text_extraction.py | 61 - .../instance_v1beta1/types/text_sentiment.py | 48 - .../types/video_action_recognition.py | 72 - .../types/video_classification.py | 72 - .../types/video_object_tracking.py | 72 - .../v1beta1/schema/predict/instance/mypy.ini | 3 - .../schema/predict/instance/noxfile.py | 132 - .../fixup_instance_v1beta1_keywords.py | 175 - .../v1beta1/schema/predict/instance/setup.py | 53 - .../schema/predict/instance/tests/__init__.py | 16 - .../predict/instance/tests/unit/__init__.py | 16 - .../instance/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/instance_v1beta1/__init__.py | 16 - .../v1beta1/schema/predict/params/.coveragerc | 17 - .../v1beta1/schema/predict/params/MANIFEST.in | 2 - .../v1beta1/schema/predict/params/README.rst | 49 - .../schema/predict/params/docs/conf.py | 376 - .../schema/predict/params/docs/index.rst | 7 - .../params/docs/params_v1beta1/services.rst | 4 - .../params/docs/params_v1beta1/types.rst | 7 - .../v1beta1/schema/predict/params/__init__.py | 31 - .../v1beta1/schema/predict/params/py.typed | 2 - .../schema/predict/params_v1beta1/__init__.py | 32 - .../params_v1beta1/gapic_metadata.json | 7 - .../schema/predict/params_v1beta1/py.typed | 2 - .../params_v1beta1/services/__init__.py | 15 - .../predict/params_v1beta1/types/__init__.py | 42 - .../types/image_classification.py | 51 - .../types/image_object_detection.py | 52 - .../types/image_segmentation.py | 44 - .../types/video_action_recognition.py | 52 - .../types/video_classification.py | 94 - .../types/video_object_tracking.py | 60 - .../v1beta1/schema/predict/params/mypy.ini | 3 - .../v1beta1/schema/predict/params/noxfile.py | 132 - .../scripts/fixup_params_v1beta1_keywords.py | 175 - .../v1beta1/schema/predict/params/setup.py | 53 - .../schema/predict/params/tests/__init__.py | 16 - .../predict/params/tests/unit/__init__.py | 16 - .../params/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/params_v1beta1/__init__.py | 16 - .../schema/predict/prediction/.coveragerc | 17 - .../schema/predict/prediction/MANIFEST.in | 2 - .../schema/predict/prediction/README.rst | 49 - .../schema/predict/prediction/docs/conf.py | 376 - .../schema/predict/prediction/docs/index.rst | 7 - .../docs/prediction_v1beta1/services.rst | 4 - .../docs/prediction_v1beta1/types.rst | 7 - .../schema/predict/prediction/__init__.py | 41 - .../schema/predict/prediction/py.typed | 2 - .../predict/prediction_v1beta1/__init__.py | 42 - .../prediction_v1beta1/gapic_metadata.json | 7 - .../predict/prediction_v1beta1/py.typed | 2 - .../prediction_v1beta1/services/__init__.py | 15 - .../prediction_v1beta1/types/__init__.py | 62 - .../types/classification.py | 56 - .../types/image_object_detection.py | 72 - .../types/image_segmentation.py | 61 - .../types/tabular_classification.py | 51 - .../types/tabular_regression.py | 52 - .../types/text_extraction.py | 77 - .../types/text_sentiment.py | 47 - .../types/time_series_forecasting.py | 40 - .../types/video_action_recognition.py | 84 - .../types/video_classification.py | 102 - .../types/video_object_tracking.py | 144 - .../schema/predict/prediction/mypy.ini | 3 - .../schema/predict/prediction/noxfile.py | 132 - .../fixup_prediction_v1beta1_keywords.py | 175 - .../schema/predict/prediction/setup.py | 53 - .../predict/prediction/tests/__init__.py | 16 - .../predict/prediction/tests/unit/__init__.py | 16 - .../prediction/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/prediction_v1beta1/__init__.py | 16 - .../fixup_aiplatform_v1beta1_keywords.py | 348 - .../fixup_definition_v1beta1_keywords.py | 175 - .../fixup_instance_v1beta1_keywords.py | 175 - .../scripts/fixup_params_v1beta1_keywords.py | 175 - .../fixup_prediction_v1beta1_keywords.py | 175 - owl-bot-staging/v1beta1/setup.py | 53 - owl-bot-staging/v1beta1/tests/__init__.py | 16 - .../v1beta1/tests/unit/__init__.py | 16 - .../v1beta1/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/aiplatform_v1beta1/__init__.py | 16 - .../test_dataset_service.py | 3983 -------- .../test_endpoint_service.py | 2868 ------ ...est_featurestore_online_serving_service.py | 1391 --- .../test_featurestore_service.py | 6489 ------------ .../test_index_endpoint_service.py | 2859 ------ .../aiplatform_v1beta1/test_index_service.py | 2370 ----- .../aiplatform_v1beta1/test_job_service.py | 9019 ----------------- .../test_metadata_service.py | 8381 --------------- .../test_migration_service.py | 1753 ---- .../aiplatform_v1beta1/test_model_service.py | 4053 -------- .../test_pipeline_service.py | 3918 ------- .../test_prediction_service.py | 1426 --- .../test_specialist_pool_service.py | 2346 ----- .../test_tensorboard_service.py | 7781 -------------- .../aiplatform_v1beta1/test_vizier_service.py | 4599 --------- .../unit/gapic/definition_v1beta1/__init__.py | 16 - .../unit/gapic/instance_v1beta1/__init__.py | 16 - .../unit/gapic/params_v1beta1/__init__.py | 16 - .../unit/gapic/prediction_v1beta1/__init__.py | 16 - .../types/image_classification.py | 4 +- .../types/image_object_detection.py | 4 +- .../instance_v1beta1/types/text_extraction.py | 2 +- .../types/video_classification.py | 23 +- .../types/classification.py | 3 +- .../gapic/aiplatform_v1/test_job_service.py | 62 +- .../aiplatform_v1/test_migration_service.py | 28 +- .../aiplatform_v1/test_pipeline_service.py | 1512 ++- .../aiplatform_v1/test_prediction_service.py | 4 +- .../test_migration_service.py | 40 +- 769 files changed, 3629 insertions(+), 242046 deletions(-) rename {owl-bot-staging/v1/google => google}/cloud/aiplatform_v1/types/artifact.py (79%) rename {owl-bot-staging/v1/google => google}/cloud/aiplatform_v1/types/context.py (78%) rename {owl-bot-staging/v1/google => google}/cloud/aiplatform_v1/types/execution.py (81%) rename {owl-bot-staging/v1/google => google}/cloud/aiplatform_v1/types/pipeline_job.py (73%) rename {owl-bot-staging/v1/google => google}/cloud/aiplatform_v1/types/value.py (69%) delete mode 100644 owl-bot-staging/v1/.coveragerc delete mode 100644 owl-bot-staging/v1/MANIFEST.in delete mode 100644 owl-bot-staging/v1/README.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/conf.py delete mode 100644 owl-bot-staging/v1/docs/definition_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/definition_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/index.rst delete mode 100644 owl-bot-staging/v1/docs/instance_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/instance_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/params_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/params_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/prediction_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/prediction_v1/types.rst delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py delete mode 100644 owl-bot-staging/v1/mypy.ini delete mode 100644 owl-bot-staging/v1/noxfile.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/.coveragerc delete mode 100644 owl-bot-staging/v1/schema/predict/instance/MANIFEST.in delete mode 100644 owl-bot-staging/v1/schema/predict/instance/README.rst delete mode 100644 owl-bot-staging/v1/schema/predict/instance/docs/conf.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/docs/index.rst delete mode 100644 owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/services.rst delete mode 100644 owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/types.rst delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/mypy.ini delete mode 100644 owl-bot-staging/v1/schema/predict/instance/noxfile.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/scripts/fixup_instance_v1_keywords.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/setup.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/tests/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/.coveragerc delete mode 100644 owl-bot-staging/v1/schema/predict/params/MANIFEST.in delete mode 100644 owl-bot-staging/v1/schema/predict/params/README.rst delete mode 100644 owl-bot-staging/v1/schema/predict/params/docs/conf.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/docs/index.rst delete mode 100644 owl-bot-staging/v1/schema/predict/params/docs/params_v1/services.rst delete mode 100644 owl-bot-staging/v1/schema/predict/params/docs/params_v1/types.rst delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/mypy.ini delete mode 100644 owl-bot-staging/v1/schema/predict/params/noxfile.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/scripts/fixup_params_v1_keywords.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/setup.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/tests/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/params_v1/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/.coveragerc delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/MANIFEST.in delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/README.rst delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/docs/conf.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/docs/index.rst delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/services.rst delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/types.rst delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/mypy.ini delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/noxfile.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/setup.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/tests/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py delete mode 100644 owl-bot-staging/v1/setup.py delete mode 100644 owl-bot-staging/v1/tests/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/.coveragerc delete mode 100644 owl-bot-staging/v1beta1/MANIFEST.in delete mode 100644 owl-bot-staging/v1beta1/README.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/conf.py delete mode 100644 owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/index.rst delete mode 100644 owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py delete mode 100644 owl-bot-staging/v1beta1/mypy.ini delete mode 100644 owl-bot-staging/v1beta1/noxfile.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/.coveragerc delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/MANIFEST.in delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/README.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/docs/conf.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/docs/index.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/mypy.ini delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/noxfile.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/setup.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/tests/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/.coveragerc delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/MANIFEST.in delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/README.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/docs/conf.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/docs/index.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/mypy.ini delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/noxfile.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/setup.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/tests/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/.coveragerc delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/MANIFEST.in delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/README.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/docs/conf.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/docs/index.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/mypy.ini delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/noxfile.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/setup.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/tests/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/setup.py delete mode 100644 owl-bot-staging/v1beta1/tests/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py index 4c2154dd90..fc5c3b587a 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -26,8 +26,8 @@ class ImageClassificationPredictionInstance(proto.Message): r"""Prediction input format for Image Classification. Attributes: content (str): - The image bytes or GCS URI to make the - prediction on. + The image bytes or Cloud Storage URI to make + the prediction on. mime_type (str): The MIME type of the content of the image. Only the images in below listed MIME types are diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py index d7b41623aa..e0293472e7 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -26,8 +26,8 @@ class ImageObjectDetectionPredictionInstance(proto.Message): r"""Prediction input format for Image Object Detection. Attributes: content (str): - The image bytes or GCS URI to make the - prediction on. + The image bytes or Cloud Storage URI to make + the prediction on. mime_type (str): The MIME type of the content of the image. Only the images in below listed MIME types are diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py index 9c393faa73..e3f7723171 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -36,7 +36,7 @@ class TextExtractionPredictionInstance(proto.Message): If a key is provided, the batch prediction result will by mapped to this key. If omitted, then the batch prediction result will contain - the entire input instance. AI Platform will not + the entire input instance. Vertex AI will not check if keys in the request are duplicates, so it is up to the caller to ensure the keys are unique. diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py index 35ad2ca0ee..dd49c20661 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -37,16 +37,16 @@ class VideoClassificationPredictionParams(proto.Message): 10,000. segment_classification (bool): Set to true to request segment-level - classification. AI Platform returns labels and + classification. Vertex AI returns labels and their confidence scores for the entire time segment of the video that user specified in the input instance. Default value is true shot_classification (bool): Set to true to request shot-level - classification. AI Platform determines the + classification. Vertex AI determines the boundaries for each camera shot in the entire time segment of the video that user specified in - the input instance. AI Platform then returns + the input instance. Vertex AI then returns labels and their confidence scores for each detected shot, along with the start and end time of the shot. @@ -57,15 +57,14 @@ class VideoClassificationPredictionParams(proto.Message): Default value is false one_sec_interval_classification (bool): Set to true to request classification for a - video at one-second intervals. AI Platform - returns labels and their confidence scores for - each second of the entire time segment of the - video that user specified in the input WARNING: - Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. Default value - is false + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false """ confidence_threshold = proto.Field(proto.FLOAT, number=1,) diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py index d37236a5cc..6f70df673f 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -27,8 +27,7 @@ class ClassificationPredictionResult(proto.Message): Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. + had been identified. display_names (Sequence[str]): The display names of the AnnotationSpecs that had been identified, order matches the IDs. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py index 3531ec74f6..a9650f92c6 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py @@ -139,6 +139,9 @@ class AutoMlTablesInputs(proto.Message): predictions to a BigQuery table. If this configuration is absent, then the export is not performed. + additional_experiments (Sequence[str]): + Additional experiment flags for the Tables + training pipeline. """ class Transformation(proto.Message): @@ -401,6 +404,7 @@ class TextArrayTransformation(proto.Message): number=10, message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, ) + additional_experiments = proto.RepeatedField(proto.STRING, number=11,) class AutoMlTablesMetadata(proto.Message): diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py index 1d3f8d0e3f..7c586c3fd5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py @@ -167,6 +167,9 @@ class AutoMlForecastingInputs(proto.Message): - "ignore-validation" - ignore the results of the validation and continue + additional_experiments (Sequence[str]): + Additional experiment flags for the time + series forcasting training. """ class Transformation(proto.Message): @@ -376,6 +379,7 @@ class Granularity(proto.Message): ) quantiles = proto.RepeatedField(proto.DOUBLE, number=16,) validation_options = proto.Field(proto.STRING, number=17,) + additional_experiments = proto.RepeatedField(proto.STRING, number=25,) class AutoMlForecastingMetadata(proto.Message): diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py index 4132a92bdc..9404e18964 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py @@ -48,6 +48,8 @@ class ModelType(proto.Enum): MODEL_TYPE_UNSPECIFIED = 0 CLOUD = 1 MOBILE_VERSATILE_1 = 2 + MOBILE_JETSON_VERSATILE_1 = 3 + MOBILE_CORAL_VERSATILE_1 = 4 model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,) diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py index 15046f72c1..861a4b3e8a 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py @@ -33,7 +33,6 @@ class ExportEvaluatedDataItemsConfig(proto.Message): If not specified, then results are exported to the following auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples override_existing_table (bool): If true and an export destination is diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index d765cc599d..f5207bb46d 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -34,8 +34,10 @@ from .types.accelerator_type import AcceleratorType from .types.annotation import Annotation from .types.annotation_spec import AnnotationSpec +from .types.artifact import Artifact from .types.batch_prediction_job import BatchPredictionJob from .types.completion_stats import CompletionStats +from .types.context import Context from .types.custom_job import ContainerSpec from .types.custom_job import CustomJob from .types.custom_job import CustomJobSpec @@ -86,6 +88,7 @@ from .types.endpoint_service import UndeployModelResponse from .types.endpoint_service import UpdateEndpointRequest from .types.env_var import EnvVar +from .types.execution import Execution from .types.hyperparameter_tuning_job import HyperparameterTuningJob from .types.io import BigQueryDestination from .types.io import BigQuerySource @@ -118,6 +121,7 @@ from .types.job_service import ListHyperparameterTuningJobsResponse from .types.job_state import JobState from .types.machine_resources import AutomaticResources +from .types.machine_resources import AutoscalingMetricSpec from .types.machine_resources import BatchDedicatedResources from .types.machine_resources import DedicatedResources from .types.machine_resources import DiskSpec @@ -157,10 +161,20 @@ from .types.model_service import UploadModelResponse from .types.operation import DeleteOperationMetadata from .types.operation import GenericOperationMetadata +from .types.pipeline_job import PipelineJob +from .types.pipeline_job import PipelineJobDetail +from .types.pipeline_job import PipelineTaskDetail +from .types.pipeline_job import PipelineTaskExecutorDetail +from .types.pipeline_service import CancelPipelineJobRequest from .types.pipeline_service import CancelTrainingPipelineRequest +from .types.pipeline_service import CreatePipelineJobRequest from .types.pipeline_service import CreateTrainingPipelineRequest +from .types.pipeline_service import DeletePipelineJobRequest from .types.pipeline_service import DeleteTrainingPipelineRequest +from .types.pipeline_service import GetPipelineJobRequest from .types.pipeline_service import GetTrainingPipelineRequest +from .types.pipeline_service import ListPipelineJobsRequest +from .types.pipeline_service import ListPipelineJobsResponse from .types.pipeline_service import ListTrainingPipelinesRequest from .types.pipeline_service import ListTrainingPipelinesResponse from .types.pipeline_state import PipelineState @@ -185,6 +199,7 @@ from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline from .types.user_action_reference import UserActionReference +from .types.value import Value __all__ = ( "DatasetServiceAsyncClient", @@ -199,7 +214,9 @@ "ActiveLearningConfig", "Annotation", "AnnotationSpec", + "Artifact", "AutomaticResources", + "AutoscalingMetricSpec", "BatchDedicatedResources", "BatchMigrateResourcesOperationMetadata", "BatchMigrateResourcesRequest", @@ -211,10 +228,12 @@ "CancelCustomJobRequest", "CancelDataLabelingJobRequest", "CancelHyperparameterTuningJobRequest", + "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", "CompletionStats", "ContainerRegistryDestination", "ContainerSpec", + "Context", "CreateBatchPredictionJobRequest", "CreateCustomJobRequest", "CreateDataLabelingJobRequest", @@ -223,6 +242,7 @@ "CreateEndpointOperationMetadata", "CreateEndpointRequest", "CreateHyperparameterTuningJobRequest", + "CreatePipelineJobRequest", "CreateSpecialistPoolOperationMetadata", "CreateSpecialistPoolRequest", "CreateTrainingPipelineRequest", @@ -241,6 +261,7 @@ "DeleteHyperparameterTuningJobRequest", "DeleteModelRequest", "DeleteOperationMetadata", + "DeletePipelineJobRequest", "DeleteSpecialistPoolRequest", "DeleteTrainingPipelineRequest", "DeployModelOperationMetadata", @@ -253,6 +274,7 @@ "Endpoint", "EndpointServiceClient", "EnvVar", + "Execution", "ExportDataConfig", "ExportDataOperationMetadata", "ExportDataRequest", @@ -275,6 +297,7 @@ "GetModelEvaluationRequest", "GetModelEvaluationSliceRequest", "GetModelRequest", + "GetPipelineJobRequest", "GetSpecialistPoolRequest", "GetTrainingPipelineRequest", "HyperparameterTuningJob", @@ -307,6 +330,8 @@ "ListModelEvaluationsResponse", "ListModelsRequest", "ListModelsResponse", + "ListPipelineJobsRequest", + "ListPipelineJobsResponse", "ListSpecialistPoolsRequest", "ListSpecialistPoolsResponse", "ListTrainingPipelinesRequest", @@ -323,8 +348,12 @@ "ModelEvaluation", "ModelEvaluationSlice", "ModelServiceClient", + "PipelineJob", + "PipelineJobDetail", "PipelineServiceClient", "PipelineState", + "PipelineTaskDetail", + "PipelineTaskExecutorDetail", "Port", "PredefinedSplit", "PredictRequest", @@ -356,5 +385,6 @@ "UploadModelRequest", "UploadModelResponse", "UserActionReference", + "Value", "WorkerPoolSpec", ) diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index 0abed0fd70..4b22c95676 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -570,26 +570,51 @@ "grpc": { "libraryClient": "PipelineServiceClient", "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, "CancelTrainingPipeline": { "methods": [ "cancel_training_pipeline" ] }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, "CreateTrainingPipeline": { "methods": [ "create_training_pipeline" ] }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, "DeleteTrainingPipeline": { "methods": [ "delete_training_pipeline" ] }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, "GetTrainingPipeline": { "methods": [ "get_training_pipeline" ] }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, "ListTrainingPipelines": { "methods": [ "list_training_pipelines" @@ -600,26 +625,51 @@ "grpc-async": { "libraryClient": "PipelineServiceAsyncClient", "rpcs": { + "CancelPipelineJob": { + "methods": [ + "cancel_pipeline_job" + ] + }, "CancelTrainingPipeline": { "methods": [ "cancel_training_pipeline" ] }, + "CreatePipelineJob": { + "methods": [ + "create_pipeline_job" + ] + }, "CreateTrainingPipeline": { "methods": [ "create_training_pipeline" ] }, + "DeletePipelineJob": { + "methods": [ + "delete_pipeline_job" + ] + }, "DeleteTrainingPipeline": { "methods": [ "delete_training_pipeline" ] }, + "GetPipelineJob": { + "methods": [ + "get_pipeline_job" + ] + }, "GetTrainingPipeline": { "methods": [ "get_training_pipeline" ] }, + "ListPipelineJobs": { + "methods": [ + "list_pipeline_jobs" + ] + }, "ListTrainingPipelines": { "methods": [ "list_training_pipelines" diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index c3a2e84cd1..1af807d346 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -87,6 +87,8 @@ class JobServiceAsyncClient: ) model_path = staticmethod(JobServiceClient.model_path) parse_model_path = staticmethod(JobServiceClient.parse_model_path) + network_path = staticmethod(JobServiceClient.network_path) + parse_network_path = staticmethod(JobServiceClient.parse_network_path) trial_path = staticmethod(JobServiceClient.trial_path) parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) common_billing_account_path = staticmethod( diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index 687a832212..a6273cd93e 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -285,6 +285,21 @@ def parse_model_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_path(project: str, network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + @staticmethod def trial_path(project: str, location: str, study: str, trial: str,) -> str: """Returns a fully-qualified trial string.""" diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index fb8d9a9a4d..162818246f 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -195,32 +195,32 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index 5da3ad8022..5608b0bbd6 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -32,6 +32,8 @@ from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import training_pipeline @@ -57,10 +59,24 @@ class PipelineServiceAsyncClient: DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT + artifact_path = staticmethod(PipelineServiceClient.artifact_path) + parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) + context_path = staticmethod(PipelineServiceClient.context_path) + parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) + custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) + parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) + execution_path = staticmethod(PipelineServiceClient.execution_path) + parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) model_path = staticmethod(PipelineServiceClient.model_path) parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) + network_path = staticmethod(PipelineServiceClient.network_path) + parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) + pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) + parse_pipeline_job_path = staticmethod( + PipelineServiceClient.parse_pipeline_job_path + ) training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) parse_training_pipeline_path = staticmethod( PipelineServiceClient.parse_training_pipeline_path @@ -597,6 +613,422 @@ async def cancel_training_pipeline( request, retry=retry, timeout=timeout, metadata=metadata, ) + async def create_pipeline_job( + self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CreatePipelineJobRequest`): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (:class:`google.cloud.aiplatform_v1.types.PipelineJob`): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (:class:`str`): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.CreatePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_pipeline_job( + self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.GetPipelineJobRequest`): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.GetPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_pipeline_jobs( + self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsAsyncPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.ListPipelineJobsRequest`): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.ListPipelineJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_pipeline_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPipelineJobsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_pipeline_job( + self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a PipelineJob. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.DeletePipelineJobRequest`): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.DeletePipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def cancel_pipeline_job( + self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (:class:`google.cloud.aiplatform_v1.types.CancelPipelineJobRequest`): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + name (:class:`str`): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = pipeline_service.CancelPipelineJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_pipeline_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index 9ea5595638..3266a00e97 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -36,6 +36,8 @@ from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import training_pipeline @@ -171,6 +173,64 @@ def transport(self) -> PipelineServiceTransport: """ return self._transport + @staticmethod + def artifact_path( + project: str, location: str, metadata_store: str, artifact: str, + ) -> str: + """Returns a fully-qualified artifact string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + + @staticmethod + def parse_artifact_path(path: str) -> Dict[str, str]: + """Parses a artifact path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def context_path( + project: str, location: str, metadata_store: str, context: str, + ) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str, str]: + """Parses a context path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def custom_job_path(project: str, location: str, custom_job: str,) -> str: + """Returns a fully-qualified custom_job string.""" + return "projects/{project}/locations/{location}/customJobs/{custom_job}".format( + project=project, location=location, custom_job=custom_job, + ) + + @staticmethod + def parse_custom_job_path(path: str) -> Dict[str, str]: + """Parses a custom_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def endpoint_path(project: str, location: str, endpoint: str,) -> str: """Returns a fully-qualified endpoint string.""" @@ -187,6 +247,27 @@ def parse_endpoint_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def execution_path( + project: str, location: str, metadata_store: str, execution: str, + ) -> str: + """Returns a fully-qualified execution string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + + @staticmethod + def parse_execution_path(path: str) -> Dict[str, str]: + """Parses a execution path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def model_path(project: str, location: str, model: str,) -> str: """Returns a fully-qualified model string.""" @@ -203,6 +284,37 @@ def parse_model_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_path(project: str, network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def pipeline_job_path(project: str, location: str, pipeline_job: str,) -> str: + """Returns a fully-qualified pipeline_job string.""" + return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( + project=project, location=location, pipeline_job=pipeline_job, + ) + + @staticmethod + def parse_pipeline_job_path(path: str) -> Dict[str, str]: + """Parses a pipeline_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def training_pipeline_path( project: str, location: str, training_pipeline: str, @@ -810,6 +922,422 @@ def cancel_training_pipeline( request, retry=retry, timeout=timeout, metadata=metadata, ) + def create_pipeline_job( + self, + request: pipeline_service.CreatePipelineJobRequest = None, + *, + parent: str = None, + pipeline_job: gca_pipeline_job.PipelineJob = None, + pipeline_job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_pipeline_job.PipelineJob: + r"""Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Args: + request (google.cloud.aiplatform_v1.types.CreatePipelineJobRequest): + The request object. Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + parent (str): + Required. The resource name of the Location to create + the PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): + Required. The PipelineJob to create. + This corresponds to the ``pipeline_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not + provided, an ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``pipeline_job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CreatePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CreatePipelineJobRequest): + request = pipeline_service.CreatePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if pipeline_job is not None: + request.pipeline_job = pipeline_job + if pipeline_job_id is not None: + request.pipeline_job_id = pipeline_job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_pipeline_job( + self, + request: pipeline_service.GetPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pipeline_job.PipelineJob: + r"""Gets a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1.types.GetPipelineJobRequest): + The request object. Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PipelineJob: + An instance of a machine learning + PipelineJob. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.GetPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.GetPipelineJobRequest): + request = pipeline_service.GetPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_pipeline_jobs( + self, + request: pipeline_service.ListPipelineJobsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPipelineJobsPager: + r"""Lists PipelineJobs in a Location. + + Args: + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The request object. Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsPager: + Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.ListPipelineJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.ListPipelineJobsRequest): + request = pipeline_service.ListPipelineJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPipelineJobsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_pipeline_job( + self, + request: pipeline_service.DeletePipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a PipelineJob. + + Args: + request (google.cloud.aiplatform_v1.types.DeletePipelineJobRequest): + The request object. Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.DeletePipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.DeletePipelineJobRequest): + request = pipeline_service.DeletePipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def cancel_pipeline_job( + self, + request: pipeline_service.CancelPipelineJobRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Args: + request (google.cloud.aiplatform_v1.types.CancelPipelineJobRequest): + The request object. Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a pipeline_service.CancelPipelineJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, pipeline_service.CancelPipelineJobRequest): + request = pipeline_service.CancelPipelineJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py index 280d676b4b..1dd7ce291c 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py @@ -24,6 +24,7 @@ Optional, ) +from google.cloud.aiplatform_v1.types import pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline @@ -158,3 +159,131 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPipelineJobsPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., pipeline_service.ListPipelineJobsResponse], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[pipeline_job.PipelineJob]: + for page in self.pages: + yield from page.pipeline_jobs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPipelineJobsAsyncPager: + """A pager for iterating through ``list_pipeline_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``pipeline_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPipelineJobs`` requests and continue to iterate + through the ``pipeline_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], + request: pipeline_service.ListPipelineJobsRequest, + response: pipeline_service.ListPipelineJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = pipeline_service.ListPipelineJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[pipeline_service.ListPipelineJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[pipeline_job.PipelineJob]: + async def async_generator(): + async for page in self.pages: + for response in page.pipeline_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py index 58b449654d..04cef84b9d 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py @@ -27,6 +27,8 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline @@ -184,6 +186,21 @@ def _prep_wrapped_messages(self, client_info): default_timeout=5.0, client_info=client_info, ), + self.create_pipeline_job: gapic_v1.method.wrap_method( + self.create_pipeline_job, default_timeout=None, client_info=client_info, + ), + self.get_pipeline_job: gapic_v1.method.wrap_method( + self.get_pipeline_job, default_timeout=None, client_info=client_info, + ), + self.list_pipeline_jobs: gapic_v1.method.wrap_method( + self.list_pipeline_jobs, default_timeout=None, client_info=client_info, + ), + self.delete_pipeline_job: gapic_v1.method.wrap_method( + self.delete_pipeline_job, default_timeout=None, client_info=client_info, + ), + self.cancel_pipeline_job: gapic_v1.method.wrap_method( + self.cancel_pipeline_job, default_timeout=None, client_info=client_info, + ), } @property @@ -245,5 +262,53 @@ def cancel_training_pipeline( ]: raise NotImplementedError() + @property + def create_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Union[gca_pipeline_job.PipelineJob, Awaitable[gca_pipeline_job.PipelineJob]], + ]: + raise NotImplementedError() + + @property + def get_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.GetPipelineJobRequest], + Union[pipeline_job.PipelineJob, Awaitable[pipeline_job.PipelineJob]], + ]: + raise NotImplementedError() + + @property + def list_pipeline_jobs( + self, + ) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Union[ + pipeline_service.ListPipelineJobsResponse, + Awaitable[pipeline_service.ListPipelineJobsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + __all__ = ("PipelineServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index 8018cf1e3d..71adcdd6ca 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -25,6 +25,8 @@ import grpc # type: ignore +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline @@ -402,5 +404,155 @@ def cancel_training_pipeline( ) return self._stubs["cancel_training_pipeline"] + @property + def create_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], gca_pipeline_job.PipelineJob + ]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_pipeline_job" not in self._stubs: + self._stubs["create_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob", + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["create_pipeline_job"] + + @property + def get_pipeline_job( + self, + ) -> Callable[[pipeline_service.GetPipelineJobRequest], pipeline_job.PipelineJob]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + ~.PipelineJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_pipeline_job" not in self._stubs: + self._stubs["get_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob", + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["get_pipeline_job"] + + @property + def list_pipeline_jobs( + self, + ) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + pipeline_service.ListPipelineJobsResponse, + ]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + ~.ListPipelineJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_pipeline_jobs" not in self._stubs: + self._stubs["list_pipeline_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs", + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs["list_pipeline_jobs"] + + @property + def delete_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_pipeline_job" not in self._stubs: + self._stubs["delete_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob", + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_pipeline_job"] + + @property + def cancel_pipeline_job( + self, + ) -> Callable[[pipeline_service.CancelPipelineJobRequest], empty_pb2.Empty]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_pipeline_job" not in self._stubs: + self._stubs["cancel_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob", + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["cancel_pipeline_job"] + __all__ = ("PipelineServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index de09eb535f..a7712b19dd 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -26,6 +26,8 @@ import grpc # type: ignore from grpc.experimental import aio # type: ignore +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline @@ -410,5 +412,160 @@ def cancel_training_pipeline( ) return self._stubs["cancel_training_pipeline"] + @property + def create_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CreatePipelineJobRequest], + Awaitable[gca_pipeline_job.PipelineJob], + ]: + r"""Return a callable for the create pipeline job method over gRPC. + + Creates a PipelineJob. A PipelineJob will run + immediately when created. + + Returns: + Callable[[~.CreatePipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_pipeline_job" not in self._stubs: + self._stubs["create_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob", + request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, + response_deserializer=gca_pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["create_pipeline_job"] + + @property + def get_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.GetPipelineJobRequest], Awaitable[pipeline_job.PipelineJob] + ]: + r"""Return a callable for the get pipeline job method over gRPC. + + Gets a PipelineJob. + + Returns: + Callable[[~.GetPipelineJobRequest], + Awaitable[~.PipelineJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_pipeline_job" not in self._stubs: + self._stubs["get_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob", + request_serializer=pipeline_service.GetPipelineJobRequest.serialize, + response_deserializer=pipeline_job.PipelineJob.deserialize, + ) + return self._stubs["get_pipeline_job"] + + @property + def list_pipeline_jobs( + self, + ) -> Callable[ + [pipeline_service.ListPipelineJobsRequest], + Awaitable[pipeline_service.ListPipelineJobsResponse], + ]: + r"""Return a callable for the list pipeline jobs method over gRPC. + + Lists PipelineJobs in a Location. + + Returns: + Callable[[~.ListPipelineJobsRequest], + Awaitable[~.ListPipelineJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_pipeline_jobs" not in self._stubs: + self._stubs["list_pipeline_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs", + request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, + response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, + ) + return self._stubs["list_pipeline_jobs"] + + @property + def delete_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.DeletePipelineJobRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the delete pipeline job method over gRPC. + + Deletes a PipelineJob. + + Returns: + Callable[[~.DeletePipelineJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_pipeline_job" not in self._stubs: + self._stubs["delete_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob", + request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_pipeline_job"] + + @property + def cancel_pipeline_job( + self, + ) -> Callable[ + [pipeline_service.CancelPipelineJobRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the cancel pipeline job method over gRPC. + + Cancels a PipelineJob. Starts asynchronous cancellation on the + PipelineJob. The server makes a best effort to cancel the + pipeline, but success is not guaranteed. Clients can use + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] + or other methods to check whether the cancellation succeeded or + whether the pipeline completed despite cancellation. On + successful cancellation, the PipelineJob is not deleted; instead + it becomes a pipeline with a + [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] + is set to ``CANCELLED``. + + Returns: + Callable[[~.CancelPipelineJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_pipeline_job" not in self._stubs: + self._stubs["cancel_pipeline_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob", + request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["cancel_pipeline_job"] + __all__ = ("PipelineServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py index fe69ba8c58..b5cde52017 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/async_client.py @@ -166,8 +166,8 @@ async def predict( request: prediction_service.PredictRequest = None, *, endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, parameters: struct_pb2.Value = None, + instances: Sequence[struct_pb2.Value] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -186,6 +186,17 @@ async def predict( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + parameters (:class:`google.protobuf.struct_pb2.Value`): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): Required. The instances that are the input to the prediction call. A DeployedModel may have an upper limit @@ -202,17 +213,6 @@ async def predict( This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (:class:`google.protobuf.struct_pb2.Value`): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -228,7 +228,7 @@ async def predict( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) + has_flattened_params = any([endpoint, parameters, instances]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " diff --git a/google/cloud/aiplatform_v1/services/prediction_service/client.py b/google/cloud/aiplatform_v1/services/prediction_service/client.py index 6caef31722..d1a1198435 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/client.py @@ -353,8 +353,8 @@ def predict( request: prediction_service.PredictRequest = None, *, endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, parameters: struct_pb2.Value = None, + instances: Sequence[struct_pb2.Value] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -373,6 +373,17 @@ def predict( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + parameters (google.protobuf.struct_pb2.Value): + The parameters that govern the prediction. The schema of + the parameters may be specified via Endpoint's + DeployedModels' [Model's + ][google.cloud.aiplatform.v1.DeployedModel.model] + [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] + [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. + + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. instances (Sequence[google.protobuf.struct_pb2.Value]): Required. The instances that are the input to the prediction call. A DeployedModel may have an upper limit @@ -389,17 +400,6 @@ def predict( This corresponds to the ``instances`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -415,7 +415,7 @@ def predict( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) + has_flattened_params = any([endpoint, parameters, instances]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -432,10 +432,10 @@ def predict( # request, apply these. if endpoint is not None: request.endpoint = endpoint - if instances is not None: - request.instances.extend(instances) if parameters is not None: request.parameters = parameters + if instances is not None: + request.instances.extend(instances) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 42bab0a05e..6d03e0c47c 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -15,8 +15,10 @@ # from .annotation import Annotation from .annotation_spec import AnnotationSpec +from .artifact import Artifact from .batch_prediction_job import BatchPredictionJob from .completion_stats import CompletionStats +from .context import Context from .custom_job import ( ContainerSpec, CustomJob, @@ -79,6 +81,7 @@ UpdateEndpointRequest, ) from .env_var import EnvVar +from .execution import Execution from .hyperparameter_tuning_job import HyperparameterTuningJob from .io import ( BigQueryDestination, @@ -115,6 +118,7 @@ ) from .machine_resources import ( AutomaticResources, + AutoscalingMetricSpec, BatchDedicatedResources, DedicatedResources, DiskSpec, @@ -163,11 +167,23 @@ DeleteOperationMetadata, GenericOperationMetadata, ) +from .pipeline_job import ( + PipelineJob, + PipelineJobDetail, + PipelineTaskDetail, + PipelineTaskExecutorDetail, +) from .pipeline_service import ( + CancelPipelineJobRequest, CancelTrainingPipelineRequest, + CreatePipelineJobRequest, CreateTrainingPipelineRequest, + DeletePipelineJobRequest, DeleteTrainingPipelineRequest, + GetPipelineJobRequest, GetTrainingPipelineRequest, + ListPipelineJobsRequest, + ListPipelineJobsResponse, ListTrainingPipelinesRequest, ListTrainingPipelinesResponse, ) @@ -200,13 +216,16 @@ TrainingPipeline, ) from .user_action_reference import UserActionReference +from .value import Value __all__ = ( "AcceleratorType", "Annotation", "AnnotationSpec", + "Artifact", "BatchPredictionJob", "CompletionStats", + "Context", "ContainerSpec", "CustomJob", "CustomJobSpec", @@ -257,6 +276,7 @@ "UndeployModelResponse", "UpdateEndpointRequest", "EnvVar", + "Execution", "HyperparameterTuningJob", "BigQueryDestination", "BigQuerySource", @@ -289,6 +309,7 @@ "ListHyperparameterTuningJobsResponse", "JobState", "AutomaticResources", + "AutoscalingMetricSpec", "BatchDedicatedResources", "DedicatedResources", "DiskSpec", @@ -328,10 +349,20 @@ "UploadModelResponse", "DeleteOperationMetadata", "GenericOperationMetadata", + "PipelineJob", + "PipelineJobDetail", + "PipelineTaskDetail", + "PipelineTaskExecutorDetail", + "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", + "CreatePipelineJobRequest", "CreateTrainingPipelineRequest", + "DeletePipelineJobRequest", "DeleteTrainingPipelineRequest", + "GetPipelineJobRequest", "GetTrainingPipelineRequest", + "ListPipelineJobsRequest", + "ListPipelineJobsResponse", "ListTrainingPipelinesRequest", "ListTrainingPipelinesResponse", "PipelineState", @@ -356,4 +387,5 @@ "TimestampSplit", "TrainingPipeline", "UserActionReference", + "Value", ) diff --git a/google/cloud/aiplatform_v1/types/accelerator_type.py b/google/cloud/aiplatform_v1/types/accelerator_type.py index 6728739a23..168fe2474f 100644 --- a/google/cloud/aiplatform_v1/types/accelerator_type.py +++ b/google/cloud/aiplatform_v1/types/accelerator_type.py @@ -29,8 +29,7 @@ class AcceleratorType(proto.Enum): NVIDIA_TESLA_V100 = 3 NVIDIA_TESLA_P4 = 4 NVIDIA_TESLA_T4 = 5 - TPU_V2 = 6 - TPU_V3 = 7 + NVIDIA_TESLA_A100 = 8 __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py b/google/cloud/aiplatform_v1/types/artifact.py similarity index 79% rename from owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py rename to google/cloud/aiplatform_v1/types/artifact.py index 6bd0512962..68428d03e8 100644 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py +++ b/google/cloud/aiplatform_v1/types/artifact.py @@ -19,10 +19,7 @@ __protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Artifact', - }, + package="google.cloud.aiplatform.v1", manifest={"Artifact",}, ) @@ -67,48 +64,25 @@ class Artifact(proto.Message): and the system does not prescribe or check the validity of state transitions. """ + class State(proto.Enum): r"""Describes the state of the Artifact.""" STATE_UNSPECIFIED = 0 PENDING = 1 LIVE = 2 - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - uri = proto.Field( - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=10, - ) + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) + uri = proto.Field(proto.STRING, number=6,) + etag = proto.Field(proto.STRING, number=9,) + labels = proto.MapField(proto.STRING, proto.STRING, number=10,) create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, + proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp, ) update_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - state = proto.Field( - proto.ENUM, - number=13, - enum=State, + proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp, ) + state = proto.Field(proto.ENUM, number=13, enum=State,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index 32194179ab..8b3e8fe60c 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -212,7 +212,7 @@ class OutputConfig(proto.Message): files are created (N depends on total number of failed predictions). These files contain the failed instances, as per their schema, followed by an additional ``error`` field - which as value has ```google.rpc.Status`` `__ + which as value has [google.rpc.Status][google.rpc.Status] containing only ``code`` and ``message`` fields. bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): The BigQuery project or dataset location where the output is @@ -234,8 +234,8 @@ class OutputConfig(proto.Message): ``errors`` table contains rows for which the prediction has failed, it has instance columns, as per the instance schema, followed by a single "errors" column, which as values has - ```google.rpc.Status`` `__ represented as a STRUCT, - and containing only ``code`` and ``message``. + [google.rpc.Status][google.rpc.Status] represented as a + STRUCT, and containing only ``code`` and ``message``. predictions_format (str): Required. The format in which Vertex AI gives the predictions, must be one of the @@ -267,6 +267,11 @@ class OutputInfo(proto.Message): Output only. The path of the BigQuery dataset created, in ``bq://projectId.bqDatasetId`` format, into which the prediction output is written. + bigquery_output_table (str): + Output only. The name of the BigQuery table created, in + ``predictions_`` format, into which the + prediction output is written. Can be used by UI to generate + the BigQuery output path, for example. """ gcs_output_directory = proto.Field( @@ -275,6 +280,7 @@ class OutputInfo(proto.Message): bigquery_output_dataset = proto.Field( proto.STRING, number=2, oneof="output_location", ) + bigquery_output_table = proto.Field(proto.STRING, number=4,) name = proto.Field(proto.STRING, number=1,) display_name = proto.Field(proto.STRING, number=2,) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py b/google/cloud/aiplatform_v1/types/context.py similarity index 78% rename from owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py rename to google/cloud/aiplatform_v1/types/context.py index ab2830492b..fc06f9c002 100644 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py +++ b/google/cloud/aiplatform_v1/types/context.py @@ -19,10 +19,7 @@ __protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Context', - }, + package="google.cloud.aiplatform.v1", manifest={"Context",}, ) @@ -61,37 +58,17 @@ class Context(proto.Message): parent_contexts. """ - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=9, - ) + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) + etag = proto.Field(proto.STRING, number=8,) + labels = proto.MapField(proto.STRING, proto.STRING, number=9,) create_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, + proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp, ) update_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - parent_contexts = proto.RepeatedField( - proto.STRING, - number=12, + proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp, ) + parent_contexts = proto.RepeatedField(proto.STRING, number=12,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py b/google/cloud/aiplatform_v1/types/execution.py similarity index 81% rename from owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py rename to google/cloud/aiplatform_v1/types/execution.py index 528814fe52..3178936646 100644 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py +++ b/google/cloud/aiplatform_v1/types/execution.py @@ -19,10 +19,7 @@ __protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Execution', - }, + package="google.cloud.aiplatform.v1", manifest={"Execution",}, ) @@ -63,6 +60,7 @@ class Execution(proto.Message): Output only. Timestamp when this Execution was last updated. """ + class State(proto.Enum): r"""Describes the state of the Execution.""" STATE_UNSPECIFIED = 0 @@ -73,37 +71,16 @@ class State(proto.Enum): CACHED = 5 CANCELLED = 6 - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - state = proto.Field( - proto.ENUM, - number=6, - enum=State, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=10, - ) + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) + state = proto.Field(proto.ENUM, number=6, enum=State,) + etag = proto.Field(proto.STRING, number=9,) + labels = proto.MapField(proto.STRING, proto.STRING, number=10,) create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, + proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp, ) update_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, + proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp, ) diff --git a/google/cloud/aiplatform_v1/types/io.py b/google/cloud/aiplatform_v1/types/io.py index d23f3d91db..3b99270f71 100644 --- a/google/cloud/aiplatform_v1/types/io.py +++ b/google/cloud/aiplatform_v1/types/io.py @@ -84,6 +84,7 @@ class BigQueryDestination(proto.Message): Accepted forms: - BigQuery path. For example: ``bq://projectId`` or + ``bq://projectId.bqDatasetId`` or ``bq://projectId.bqDatasetId.bqTableId``. """ diff --git a/google/cloud/aiplatform_v1/types/job_state.py b/google/cloud/aiplatform_v1/types/job_state.py index 59c0949844..f5c7ebba63 100644 --- a/google/cloud/aiplatform_v1/types/job_state.py +++ b/google/cloud/aiplatform_v1/types/job_state.py @@ -32,6 +32,7 @@ class JobState(proto.Enum): JOB_STATE_CANCELLING = 6 JOB_STATE_CANCELLED = 7 JOB_STATE_PAUSED = 8 + JOB_STATE_EXPIRED = 9 __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index fb926e987d..d42e58ab0c 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -27,6 +27,7 @@ "BatchDedicatedResources", "ResourcesConsumed", "DiskSpec", + "AutoscalingMetricSpec", }, ) @@ -77,15 +78,14 @@ class DedicatedResources(proto.Message): Required. Immutable. The specification of a single machine used by the prediction. min_replica_count (int): - Required. Immutable. The minimum number of machine replicas - this DeployedModel will be always deployed on. If traffic - against it increases, it may dynamically be deployed onto - more replicas, and as traffic decreases, some of these extra - replicas may be freed. Note: if - [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] - is above 0, currently the model will be always deployed - precisely on - [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count]. + Required. Immutable. The minimum number of + machine replicas this DeployedModel will be + always deployed on. This value must be greater + than or equal to 1. + If traffic against the DeployedModel increases, + it may dynamically be deployed onto more + replicas, and as traffic decreases, some of + these extra replicas may be freed. max_replica_count (int): Immutable. The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If @@ -98,11 +98,42 @@ class DedicatedResources(proto.Message): will use [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] as the default value. + autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1.types.AutoscalingMetricSpec]): + Immutable. The metric specifications that overrides a + resource utilization metric (CPU utilization, accelerator's + duty cycle, and so on) target value (default to 60 if not + set). At most one entry is allowed per metric. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] + is above 0, the autoscaling will be based on both CPU + utilization and accelerator's duty cycle metrics and scale + up when either metrics exceeds its target value while scale + down if both metrics are under their target value. The + default target value is 60 for both metrics. + + If + [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] + is 0, the autoscaling will be based on CPU utilization + metric only with default target value 60 if not explicitly + set. + + For example, in the case of Online Prediction, if you want + to override target CPU utilization to 80, you should set + [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1.AutoscalingMetricSpec.metric_name] + to + ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + and + [autoscaling_metric_specs.target][google.cloud.aiplatform.v1.AutoscalingMetricSpec.target] + to ``80``. """ machine_spec = proto.Field(proto.MESSAGE, number=1, message="MachineSpec",) min_replica_count = proto.Field(proto.INT32, number=2,) max_replica_count = proto.Field(proto.INT32, number=3,) + autoscaling_metric_specs = proto.RepeatedField( + proto.MESSAGE, number=4, message="AutoscalingMetricSpec", + ) class AutomaticResources(proto.Message): @@ -197,4 +228,29 @@ class DiskSpec(proto.Message): boot_disk_size_gb = proto.Field(proto.INT32, number=2,) +class AutoscalingMetricSpec(proto.Message): + r"""The metric specification that defines the target resource + utilization (CPU utilization, accelerator's duty cycle, and so + on) for calculating the desired replica count. + + Attributes: + metric_name (str): + Required. The resource metric name. Supported metrics: + + - For Online Prediction: + - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` + - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` + target (int): + The target resource utilization in percentage + (1% - 100%) for the given metric; once the real + usage deviates from the target by a certain + percentage, the machine replicas change. The + default value is 60 (representing 60%) if not + provided. + """ + + metric_name = proto.Field(proto.STRING, number=1,) + target = proto.Field(proto.INT32, number=2,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py b/google/cloud/aiplatform_v1/types/pipeline_job.py similarity index 73% rename from owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py rename to google/cloud/aiplatform_v1/types/pipeline_job.py index 280d02bc17..d2f48fa8a7 100644 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -27,12 +27,12 @@ __protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', + package="google.cloud.aiplatform.v1", manifest={ - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', + "PipelineJob", + "PipelineJobDetail", + "PipelineTaskDetail", + "PipelineTaskExecutorDetail", }, ) @@ -130,87 +130,27 @@ class RuntimeConfig(proto.Message): """ parameters = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=gca_value.Value, - ) - gcs_output_directory = proto.Field( - proto.STRING, - number=2, + proto.STRING, proto.MESSAGE, number=1, message=gca_value.Value, ) + gcs_output_directory = proto.Field(proto.STRING, number=2,) - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - pipeline_spec = proto.Field( - proto.MESSAGE, - number=7, - message=struct_pb2.Struct, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=pipeline_state.PipelineState, - ) - job_detail = proto.Field( - proto.MESSAGE, - number=9, - message='PipelineJobDetail', - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - runtime_config = proto.Field( - proto.MESSAGE, - number=12, - message=RuntimeConfig, - ) + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) + update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,) + pipeline_spec = proto.Field(proto.MESSAGE, number=7, message=struct_pb2.Struct,) + state = proto.Field(proto.ENUM, number=8, enum=pipeline_state.PipelineState,) + job_detail = proto.Field(proto.MESSAGE, number=9, message="PipelineJobDetail",) + error = proto.Field(proto.MESSAGE, number=10, message=status_pb2.Status,) + labels = proto.MapField(proto.STRING, proto.STRING, number=11,) + runtime_config = proto.Field(proto.MESSAGE, number=12, message=RuntimeConfig,) encryption_spec = proto.Field( - proto.MESSAGE, - number=16, - message=gca_encryption_spec.EncryptionSpec, - ) - service_account = proto.Field( - proto.STRING, - number=17, - ) - network = proto.Field( - proto.STRING, - number=18, + proto.MESSAGE, number=16, message=gca_encryption_spec.EncryptionSpec, ) + service_account = proto.Field(proto.STRING, number=17,) + network = proto.Field(proto.STRING, number=18,) class PipelineJobDetail(proto.Message): @@ -226,20 +166,12 @@ class PipelineJobDetail(proto.Message): under the pipeline. """ - pipeline_context = proto.Field( - proto.MESSAGE, - number=1, - message=context.Context, - ) + pipeline_context = proto.Field(proto.MESSAGE, number=1, message=context.Context,) pipeline_run_context = proto.Field( - proto.MESSAGE, - number=2, - message=context.Context, + proto.MESSAGE, number=2, message=context.Context, ) task_details = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='PipelineTaskDetail', + proto.MESSAGE, number=3, message="PipelineTaskDetail", ) @@ -280,6 +212,7 @@ class PipelineTaskDetail(proto.Message): Output only. The runtime output artifacts of the task. """ + class State(proto.Enum): r"""Specifies state of TaskExecution""" STATE_UNSPECIFIED = 0 @@ -301,69 +234,26 @@ class ArtifactList(proto.Message): """ artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=artifact.Artifact, + proto.MESSAGE, number=1, message=artifact.Artifact, ) - task_id = proto.Field( - proto.INT64, - number=1, - ) - parent_task_id = proto.Field( - proto.INT64, - number=12, - ) - task_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) + task_id = proto.Field(proto.INT64, number=1,) + parent_task_id = proto.Field(proto.INT64, number=12,) + task_name = proto.Field(proto.STRING, number=2,) + create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) executor_detail = proto.Field( - proto.MESSAGE, - number=6, - message='PipelineTaskExecutorDetail', - ) - state = proto.Field( - proto.ENUM, - number=7, - enum=State, - ) - execution = proto.Field( - proto.MESSAGE, - number=8, - message=gca_execution.Execution, - ) - error = proto.Field( - proto.MESSAGE, - number=9, - message=status_pb2.Status, + proto.MESSAGE, number=6, message="PipelineTaskExecutorDetail", ) + state = proto.Field(proto.ENUM, number=7, enum=State,) + execution = proto.Field(proto.MESSAGE, number=8, message=gca_execution.Execution,) + error = proto.Field(proto.MESSAGE, number=9, message=status_pb2.Status,) inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=10, - message=ArtifactList, + proto.STRING, proto.MESSAGE, number=10, message=ArtifactList, ) outputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=11, - message=ArtifactList, + proto.STRING, proto.MESSAGE, number=11, message=ArtifactList, ) @@ -397,14 +287,8 @@ class ContainerDetail(proto.Message): events. """ - main_job = proto.Field( - proto.STRING, - number=1, - ) - pre_caching_check_job = proto.Field( - proto.STRING, - number=2, - ) + main_job = proto.Field(proto.STRING, number=1,) + pre_caching_check_job = proto.Field(proto.STRING, number=2,) class CustomJobDetail(proto.Message): r"""The detailed info for a custom job executor. @@ -414,22 +298,13 @@ class CustomJobDetail(proto.Message): [CustomJob][google.cloud.aiplatform.v1.CustomJob]. """ - job = proto.Field( - proto.STRING, - number=1, - ) + job = proto.Field(proto.STRING, number=1,) container_detail = proto.Field( - proto.MESSAGE, - number=1, - oneof='details', - message=ContainerDetail, + proto.MESSAGE, number=1, oneof="details", message=ContainerDetail, ) custom_job_detail = proto.Field( - proto.MESSAGE, - number=2, - oneof='details', - message=CustomJobDetail, + proto.MESSAGE, number=2, oneof="details", message=CustomJobDetail, ) diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py index 87681a47e6..565952c931 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -15,6 +15,7 @@ # import proto # type: ignore +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline from google.protobuf import field_mask_pb2 # type: ignore @@ -28,6 +29,12 @@ "ListTrainingPipelinesResponse", "DeleteTrainingPipelineRequest", "CancelTrainingPipelineRequest", + "CreatePipelineJobRequest", + "GetPipelineJobRequest", + "ListPipelineJobsRequest", + "ListPipelineJobsResponse", + "DeletePipelineJobRequest", + "CancelPipelineJobRequest", }, ) @@ -74,21 +81,35 @@ class ListTrainingPipelinesRequest(proto.Message): TrainingPipelines from. Format: ``projects/{project}/locations/{location}`` filter (str): - The standard list filter. Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - - - ``NOT display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_FAILED"`` + Lists the PipelineJobs that match the filter expression. The + following fields are supported: + + - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. + - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, + ``<=``, and ``>=`` comparisons. Values must be in RFC + 3339 format. + - ``labels``: Supports key-value equality and key presence. + + Filter expressions can be combined together using logical + operators (``AND`` & ``OR``). For example: + ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. + + The syntax to define filter expression is based on + https://google.aip.dev/160. + + Examples: + + - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` + PipelineJobs created or updated after 2020-05-18 00:00:00 + UTC. + - ``labels.env = "prod"`` PipelineJobs with label "env" set + to "prod". page_size (int): The standard list page size. page_token (str): @@ -160,4 +181,132 @@ class CancelTrainingPipelineRequest(proto.Message): name = proto.Field(proto.STRING, number=1,) +class CreatePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + PipelineJob in. Format: + ``projects/{project}/locations/{location}`` + pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): + Required. The PipelineJob to create. + pipeline_job_id (str): + The ID to use for the PipelineJob, which will become the + final component of the PipelineJob name. If not provided, an + ID will be automatically generated. + + This value should be less than 128 characters, and valid + characters are /[a-z][0-9]-/. + """ + + parent = proto.Field(proto.STRING, number=1,) + pipeline_job = proto.Field( + proto.MESSAGE, number=2, message=gca_pipeline_job.PipelineJob, + ) + pipeline_job_id = proto.Field(proto.STRING, number=3,) + + +class GetPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class ListPipelineJobsRequest(proto.Message): + r"""Request message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + PipelineJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + The standard list filter. Supported fields: + + - ``display_name`` supports ``=`` and ``!=``. + - ``state`` supports ``=`` and ``!=``. + + The following examples demonstrate how to filter the list of + PipelineJobs: + + - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` + - ``NOT display_name="my_pipeline"`` + - ``state="PIPELINE_STATE_FAILED"`` + page_size (int): + The standard list page size. + page_token (str): + The standard list page token. Typically obtained via + [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token] + of the previous + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + call. + """ + + parent = proto.Field(proto.STRING, number=1,) + filter = proto.Field(proto.STRING, number=2,) + page_size = proto.Field(proto.INT32, number=3,) + page_token = proto.Field(proto.STRING, number=4,) + + +class ListPipelineJobsResponse(proto.Message): + r"""Response message for + [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] + + Attributes: + pipeline_jobs (Sequence[google.cloud.aiplatform_v1.types.PipelineJob]): + List of PipelineJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1.ListPipelineJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + pipeline_jobs = proto.RepeatedField( + proto.MESSAGE, number=1, message=gca_pipeline_job.PipelineJob, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class DeletePipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob resource to be + deleted. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + +class CancelPipelineJobRequest(proto.Message): + r"""Request message for + [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. + + Attributes: + name (str): + Required. The name of the PipelineJob to cancel. Format: + ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` + """ + + name = proto.Field(proto.STRING, number=1,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/study.py b/google/cloud/aiplatform_v1/types/study.py index 40a6c84ce5..3f196b3963 100644 --- a/google/cloud/aiplatform_v1/types/study.py +++ b/google/cloud/aiplatform_v1/types/study.py @@ -15,6 +15,7 @@ # import proto # type: ignore +from google.protobuf import duration_pb2 # type: ignore from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore @@ -31,6 +32,9 @@ class Trial(proto.Message): objective metrics got by running the Trial. Attributes: + name (str): + Output only. Resource name of the Trial + assigned by the service. id (str): Output only. The identifier of the Trial assigned by the service. @@ -41,11 +45,30 @@ class Trial(proto.Message): final_measurement (google.cloud.aiplatform_v1.types.Measurement): Output only. The final measurement containing the objective value. + measurements (Sequence[google.cloud.aiplatform_v1.types.Measurement]): + Output only. A list of measurements that are strictly + lexicographically ordered by their induced tuples (steps, + elapsed_duration). These are used for early stopping + computations. start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the Trial was started. end_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the Trial's status changed to ``SUCCEEDED`` or ``INFEASIBLE``. + client_id (str): + Output only. The identifier of the client that originally + requested this Trial. Each client is identified by a unique + client_id. When a client asks for a suggestion, Vizier will + assign it a Trial. The client should evaluate the Trial, + complete it, and report back to Vizier. If suggestion is + asked again by same client_id before the Trial is completed, + the same Trial will be returned. Multiple clients with + different client_ids can ask for suggestions simultaneously, + each of them will get their own Trial. + infeasible_reason (str): + Output only. A human readable string describing why the + Trial is infeasible. This is set only if Trial state is + ``INFEASIBLE``. custom_job (str): Output only. The CustomJob name linked to the Trial. It's set for a HyperparameterTuningJob's @@ -79,12 +102,16 @@ class Parameter(proto.Message): parameter_id = proto.Field(proto.STRING, number=1,) value = proto.Field(proto.MESSAGE, number=2, message=struct_pb2.Value,) + name = proto.Field(proto.STRING, number=1,) id = proto.Field(proto.STRING, number=2,) state = proto.Field(proto.ENUM, number=3, enum=State,) parameters = proto.RepeatedField(proto.MESSAGE, number=4, message=Parameter,) final_measurement = proto.Field(proto.MESSAGE, number=5, message="Measurement",) + measurements = proto.RepeatedField(proto.MESSAGE, number=6, message="Measurement",) start_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) end_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,) + client_id = proto.Field(proto.STRING, number=9,) + infeasible_reason = proto.Field(proto.STRING, number=10,) custom_job = proto.Field(proto.STRING, number=11,) @@ -203,10 +230,18 @@ class DoubleValueSpec(proto.Message): max_value (float): Required. Inclusive maximum value of the parameter. + default_value (float): + A default value for a ``DOUBLE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. """ min_value = proto.Field(proto.DOUBLE, number=1,) max_value = proto.Field(proto.DOUBLE, number=2,) + default_value = proto.Field(proto.DOUBLE, number=4, optional=True,) class IntegerValueSpec(proto.Message): r"""Value specification for a parameter in ``INTEGER`` type. @@ -217,19 +252,35 @@ class IntegerValueSpec(proto.Message): max_value (int): Required. Inclusive maximum value of the parameter. + default_value (int): + A default value for an ``INTEGER`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. """ min_value = proto.Field(proto.INT64, number=1,) max_value = proto.Field(proto.INT64, number=2,) + default_value = proto.Field(proto.INT64, number=4, optional=True,) class CategoricalValueSpec(proto.Message): r"""Value specification for a parameter in ``CATEGORICAL`` type. Attributes: values (Sequence[str]): Required. The list of possible categories. + default_value (str): + A default value for a ``CATEGORICAL`` parameter that is + assumed to be a relatively good starting point. Unset value + signals that there is no offered starting point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. """ values = proto.RepeatedField(proto.STRING, number=1,) + default_value = proto.Field(proto.STRING, number=3, optional=True,) class DiscreteValueSpec(proto.Message): r"""Value specification for a parameter in ``DISCRETE`` type. @@ -241,9 +292,18 @@ class DiscreteValueSpec(proto.Message): might have possible settings of 1.5, 2.5, and 4.0. This list should not contain more than 1,000 values. + default_value (float): + A default value for a ``DISCRETE`` parameter that is assumed + to be a relatively good starting point. Unset value signals + that there is no offered starting point. It automatically + rounds to the nearest feasible discrete point. + + Currently only supported by the Vizier service. Not + supported by HyperparamterTuningJob or TrainingPipeline. """ values = proto.RepeatedField(proto.DOUBLE, number=1,) + default_value = proto.Field(proto.DOUBLE, number=3, optional=True,) class ConditionalParameterSpec(proto.Message): r"""Represents a parameter spec with condition from its parent @@ -376,6 +436,9 @@ class Measurement(proto.Message): suggested hyperparameter values. Attributes: + elapsed_duration (google.protobuf.duration_pb2.Duration): + Output only. Time that the Trial has been + running at the point of this Measurement. step_count (int): Output only. The number of steps the machine learning model has been trained for. Must be @@ -400,6 +463,9 @@ class Metric(proto.Message): metric_id = proto.Field(proto.STRING, number=1,) value = proto.Field(proto.DOUBLE, number=2,) + elapsed_duration = proto.Field( + proto.MESSAGE, number=1, message=duration_pb2.Duration, + ) step_count = proto.Field(proto.INT64, number=2,) metrics = proto.RepeatedField(proto.MESSAGE, number=3, message=Metric,) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py b/google/cloud/aiplatform_v1/types/value.py similarity index 69% rename from owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py rename to google/cloud/aiplatform_v1/types/value.py index 48c1092c1e..acc5f50517 100644 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py +++ b/google/cloud/aiplatform_v1/types/value.py @@ -16,12 +16,7 @@ import proto # type: ignore -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Value', - }, -) +__protobuf__ = proto.module(package="google.cloud.aiplatform.v1", manifest={"Value",},) class Value(proto.Message): @@ -35,21 +30,9 @@ class Value(proto.Message): A string value. """ - int_value = proto.Field( - proto.INT64, - number=1, - oneof='value', - ) - double_value = proto.Field( - proto.DOUBLE, - number=2, - oneof='value', - ) - string_value = proto.Field( - proto.STRING, - number=3, - oneof='value', - ) + int_value = proto.Field(proto.INT64, number=1, oneof="value",) + double_value = proto.Field(proto.DOUBLE, number=2, oneof="value",) + string_value = proto.Field(proto.STRING, number=3, oneof="value",) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py index 4736076c68..47d85e73f2 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/client.py @@ -179,19 +179,16 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod @@ -211,16 +208,19 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc deleted file mode 100644 index 01d28d4b2c..0000000000 --- a/owl-bot-staging/v1/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in deleted file mode 100644 index d55f1f202e..0000000000 --- a/owl-bot-staging/v1/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1/schema/trainingjob/definition *.py -recursive-include google/cloud/aiplatform/v1/schema/trainingjob/definition_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst deleted file mode 100644 index ad49c55e02..0000000000 --- a/owl-bot-staging/v1/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1 Schema Trainingjob Definition API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1 Schema Trainingjob Definition API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst deleted file mode 100644 index 79ddc4623f..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -DatasetService --------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.dataset_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.dataset_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst deleted file mode 100644 index 3b900f851e..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -EndpointService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst deleted file mode 100644 index 6afcbbb4d0..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -JobService ----------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.job_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.job_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst deleted file mode 100644 index ac0a5fb3aa..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -MigrationService ----------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.migration_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.migration_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst deleted file mode 100644 index 8baab43cbc..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -ModelService ------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.model_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.model_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst deleted file mode 100644 index bbf6b32092..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -PipelineService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst deleted file mode 100644 index fdda504879..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst +++ /dev/null @@ -1,6 +0,0 @@ -PredictionService ------------------------------------ - -.. automodule:: google.cloud.aiplatform_v1.services.prediction_service - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/services.rst b/owl-bot-staging/v1/docs/aiplatform_v1/services.rst deleted file mode 100644 index fd5a8c9aa7..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/services.rst +++ /dev/null @@ -1,13 +0,0 @@ -Services for Google Cloud Aiplatform v1 API -=========================================== -.. toctree:: - :maxdepth: 2 - - dataset_service - endpoint_service - job_service - migration_service - model_service - pipeline_service - prediction_service - specialist_pool_service diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst deleted file mode 100644 index 4a6f288894..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -SpecialistPoolService ---------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/types.rst b/owl-bot-staging/v1/docs/aiplatform_v1/types.rst deleted file mode 100644 index ad4454843f..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform v1 API -======================================== - -.. automodule:: google.cloud.aiplatform_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py deleted file mode 100644 index 4e03a7096d..0000000000 --- a/owl-bot-staging/v1/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1-schema-trainingjob-definition documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1-schema-trainingjob-definition" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1 Schema Trainingjob Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1-schema-trainingjob-definition-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-trainingjob-definition.tex", - u"google-cloud-aiplatform-v1-schema-trainingjob-definition Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-trainingjob-definition", - u"Google Cloud Aiplatform V1 Schema Trainingjob Definition Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-trainingjob-definition", - u"google-cloud-aiplatform-v1-schema-trainingjob-definition Documentation", - author, - "google-cloud-aiplatform-v1-schema-trainingjob-definition", - "GAPIC library for Google Cloud Aiplatform V1 Schema Trainingjob Definition API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/definition_v1/services.rst b/owl-bot-staging/v1/docs/definition_v1/services.rst deleted file mode 100644 index ba6b1940e8..0000000000 --- a/owl-bot-staging/v1/docs/definition_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Trainingjob Definition v1 API -============================================================================ -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/definition_v1/types.rst b/owl-bot-staging/v1/docs/definition_v1/types.rst deleted file mode 100644 index a1df2bce25..0000000000 --- a/owl-bot-staging/v1/docs/definition_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Trainingjob Definition v1 API -========================================================================= - -.. automodule:: google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst deleted file mode 100644 index ad6ae57609..0000000000 --- a/owl-bot-staging/v1/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - definition_v1/services - definition_v1/types diff --git a/owl-bot-staging/v1/docs/instance_v1/services.rst b/owl-bot-staging/v1/docs/instance_v1/services.rst deleted file mode 100644 index 50c011c69a..0000000000 --- a/owl-bot-staging/v1/docs/instance_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Predict Instance v1 API -====================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/instance_v1/types.rst b/owl-bot-staging/v1/docs/instance_v1/types.rst deleted file mode 100644 index 564ab013ee..0000000000 --- a/owl-bot-staging/v1/docs/instance_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Predict Instance v1 API -=================================================================== - -.. automodule:: google.cloud.aiplatform.v1.schema.predict.instance_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/params_v1/services.rst b/owl-bot-staging/v1/docs/params_v1/services.rst deleted file mode 100644 index bf08ea6e98..0000000000 --- a/owl-bot-staging/v1/docs/params_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Predict Params v1 API -==================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/params_v1/types.rst b/owl-bot-staging/v1/docs/params_v1/types.rst deleted file mode 100644 index 956ef5224d..0000000000 --- a/owl-bot-staging/v1/docs/params_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Predict Params v1 API -================================================================= - -.. automodule:: google.cloud.aiplatform.v1.schema.predict.params_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/prediction_v1/services.rst b/owl-bot-staging/v1/docs/prediction_v1/services.rst deleted file mode 100644 index ad6f034387..0000000000 --- a/owl-bot-staging/v1/docs/prediction_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API -======================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/prediction_v1/types.rst b/owl-bot-staging/v1/docs/prediction_v1/types.rst deleted file mode 100644 index a97faf34de..0000000000 --- a/owl-bot-staging/v1/docs/prediction_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API -===================================================================== - -.. automodule:: google.cloud.aiplatform.v1.schema.predict.prediction_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py deleted file mode 100644 index 8099ef5dfc..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py +++ /dev/null @@ -1,389 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.cloud.aiplatform_v1.services.dataset_service.client import DatasetServiceClient -from google.cloud.aiplatform_v1.services.dataset_service.async_client import DatasetServiceAsyncClient -from google.cloud.aiplatform_v1.services.endpoint_service.client import EndpointServiceClient -from google.cloud.aiplatform_v1.services.endpoint_service.async_client import EndpointServiceAsyncClient -from google.cloud.aiplatform_v1.services.job_service.client import JobServiceClient -from google.cloud.aiplatform_v1.services.job_service.async_client import JobServiceAsyncClient -from google.cloud.aiplatform_v1.services.migration_service.client import MigrationServiceClient -from google.cloud.aiplatform_v1.services.migration_service.async_client import MigrationServiceAsyncClient -from google.cloud.aiplatform_v1.services.model_service.client import ModelServiceClient -from google.cloud.aiplatform_v1.services.model_service.async_client import ModelServiceAsyncClient -from google.cloud.aiplatform_v1.services.pipeline_service.client import PipelineServiceClient -from google.cloud.aiplatform_v1.services.pipeline_service.async_client import PipelineServiceAsyncClient -from google.cloud.aiplatform_v1.services.prediction_service.client import PredictionServiceClient -from google.cloud.aiplatform_v1.services.prediction_service.async_client import PredictionServiceAsyncClient -from google.cloud.aiplatform_v1.services.specialist_pool_service.client import SpecialistPoolServiceClient -from google.cloud.aiplatform_v1.services.specialist_pool_service.async_client import SpecialistPoolServiceAsyncClient - -from google.cloud.aiplatform_v1.types.accelerator_type import AcceleratorType -from google.cloud.aiplatform_v1.types.annotation import Annotation -from google.cloud.aiplatform_v1.types.annotation_spec import AnnotationSpec -from google.cloud.aiplatform_v1.types.artifact import Artifact -from google.cloud.aiplatform_v1.types.batch_prediction_job import BatchPredictionJob -from google.cloud.aiplatform_v1.types.completion_stats import CompletionStats -from google.cloud.aiplatform_v1.types.context import Context -from google.cloud.aiplatform_v1.types.custom_job import ContainerSpec -from google.cloud.aiplatform_v1.types.custom_job import CustomJob -from google.cloud.aiplatform_v1.types.custom_job import CustomJobSpec -from google.cloud.aiplatform_v1.types.custom_job import PythonPackageSpec -from google.cloud.aiplatform_v1.types.custom_job import Scheduling -from google.cloud.aiplatform_v1.types.custom_job import WorkerPoolSpec -from google.cloud.aiplatform_v1.types.data_item import DataItem -from google.cloud.aiplatform_v1.types.data_labeling_job import ActiveLearningConfig -from google.cloud.aiplatform_v1.types.data_labeling_job import DataLabelingJob -from google.cloud.aiplatform_v1.types.data_labeling_job import SampleConfig -from google.cloud.aiplatform_v1.types.data_labeling_job import TrainingConfig -from google.cloud.aiplatform_v1.types.dataset import Dataset -from google.cloud.aiplatform_v1.types.dataset import ExportDataConfig -from google.cloud.aiplatform_v1.types.dataset import ImportDataConfig -from google.cloud.aiplatform_v1.types.dataset_service import CreateDatasetOperationMetadata -from google.cloud.aiplatform_v1.types.dataset_service import CreateDatasetRequest -from google.cloud.aiplatform_v1.types.dataset_service import DeleteDatasetRequest -from google.cloud.aiplatform_v1.types.dataset_service import ExportDataOperationMetadata -from google.cloud.aiplatform_v1.types.dataset_service import ExportDataRequest -from google.cloud.aiplatform_v1.types.dataset_service import ExportDataResponse -from google.cloud.aiplatform_v1.types.dataset_service import GetAnnotationSpecRequest -from google.cloud.aiplatform_v1.types.dataset_service import GetDatasetRequest -from google.cloud.aiplatform_v1.types.dataset_service import ImportDataOperationMetadata -from google.cloud.aiplatform_v1.types.dataset_service import ImportDataRequest -from google.cloud.aiplatform_v1.types.dataset_service import ImportDataResponse -from google.cloud.aiplatform_v1.types.dataset_service import ListAnnotationsRequest -from google.cloud.aiplatform_v1.types.dataset_service import ListAnnotationsResponse -from google.cloud.aiplatform_v1.types.dataset_service import ListDataItemsRequest -from google.cloud.aiplatform_v1.types.dataset_service import ListDataItemsResponse -from google.cloud.aiplatform_v1.types.dataset_service import ListDatasetsRequest -from google.cloud.aiplatform_v1.types.dataset_service import ListDatasetsResponse -from google.cloud.aiplatform_v1.types.dataset_service import UpdateDatasetRequest -from google.cloud.aiplatform_v1.types.deployed_model_ref import DeployedModelRef -from google.cloud.aiplatform_v1.types.encryption_spec import EncryptionSpec -from google.cloud.aiplatform_v1.types.endpoint import DeployedModel -from google.cloud.aiplatform_v1.types.endpoint import Endpoint -from google.cloud.aiplatform_v1.types.endpoint_service import CreateEndpointOperationMetadata -from google.cloud.aiplatform_v1.types.endpoint_service import CreateEndpointRequest -from google.cloud.aiplatform_v1.types.endpoint_service import DeleteEndpointRequest -from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelOperationMetadata -from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelRequest -from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelResponse -from google.cloud.aiplatform_v1.types.endpoint_service import GetEndpointRequest -from google.cloud.aiplatform_v1.types.endpoint_service import ListEndpointsRequest -from google.cloud.aiplatform_v1.types.endpoint_service import ListEndpointsResponse -from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelOperationMetadata -from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelRequest -from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelResponse -from google.cloud.aiplatform_v1.types.endpoint_service import UpdateEndpointRequest -from google.cloud.aiplatform_v1.types.env_var import EnvVar -from google.cloud.aiplatform_v1.types.execution import Execution -from google.cloud.aiplatform_v1.types.hyperparameter_tuning_job import HyperparameterTuningJob -from google.cloud.aiplatform_v1.types.io import BigQueryDestination -from google.cloud.aiplatform_v1.types.io import BigQuerySource -from google.cloud.aiplatform_v1.types.io import ContainerRegistryDestination -from google.cloud.aiplatform_v1.types.io import GcsDestination -from google.cloud.aiplatform_v1.types.io import GcsSource -from google.cloud.aiplatform_v1.types.job_service import CancelBatchPredictionJobRequest -from google.cloud.aiplatform_v1.types.job_service import CancelCustomJobRequest -from google.cloud.aiplatform_v1.types.job_service import CancelDataLabelingJobRequest -from google.cloud.aiplatform_v1.types.job_service import CancelHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1.types.job_service import CreateBatchPredictionJobRequest -from google.cloud.aiplatform_v1.types.job_service import CreateCustomJobRequest -from google.cloud.aiplatform_v1.types.job_service import CreateDataLabelingJobRequest -from google.cloud.aiplatform_v1.types.job_service import CreateHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1.types.job_service import DeleteBatchPredictionJobRequest -from google.cloud.aiplatform_v1.types.job_service import DeleteCustomJobRequest -from google.cloud.aiplatform_v1.types.job_service import DeleteDataLabelingJobRequest -from google.cloud.aiplatform_v1.types.job_service import DeleteHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1.types.job_service import GetBatchPredictionJobRequest -from google.cloud.aiplatform_v1.types.job_service import GetCustomJobRequest -from google.cloud.aiplatform_v1.types.job_service import GetDataLabelingJobRequest -from google.cloud.aiplatform_v1.types.job_service import GetHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1.types.job_service import ListBatchPredictionJobsRequest -from google.cloud.aiplatform_v1.types.job_service import ListBatchPredictionJobsResponse -from google.cloud.aiplatform_v1.types.job_service import ListCustomJobsRequest -from google.cloud.aiplatform_v1.types.job_service import ListCustomJobsResponse -from google.cloud.aiplatform_v1.types.job_service import ListDataLabelingJobsRequest -from google.cloud.aiplatform_v1.types.job_service import ListDataLabelingJobsResponse -from google.cloud.aiplatform_v1.types.job_service import ListHyperparameterTuningJobsRequest -from google.cloud.aiplatform_v1.types.job_service import ListHyperparameterTuningJobsResponse -from google.cloud.aiplatform_v1.types.job_state import JobState -from google.cloud.aiplatform_v1.types.machine_resources import AutomaticResources -from google.cloud.aiplatform_v1.types.machine_resources import AutoscalingMetricSpec -from google.cloud.aiplatform_v1.types.machine_resources import BatchDedicatedResources -from google.cloud.aiplatform_v1.types.machine_resources import DedicatedResources -from google.cloud.aiplatform_v1.types.machine_resources import DiskSpec -from google.cloud.aiplatform_v1.types.machine_resources import MachineSpec -from google.cloud.aiplatform_v1.types.machine_resources import ResourcesConsumed -from google.cloud.aiplatform_v1.types.manual_batch_tuning_parameters import ManualBatchTuningParameters -from google.cloud.aiplatform_v1.types.migratable_resource import MigratableResource -from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesOperationMetadata -from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesRequest -from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesResponse -from google.cloud.aiplatform_v1.types.migration_service import MigrateResourceRequest -from google.cloud.aiplatform_v1.types.migration_service import MigrateResourceResponse -from google.cloud.aiplatform_v1.types.migration_service import SearchMigratableResourcesRequest -from google.cloud.aiplatform_v1.types.migration_service import SearchMigratableResourcesResponse -from google.cloud.aiplatform_v1.types.model import Model -from google.cloud.aiplatform_v1.types.model import ModelContainerSpec -from google.cloud.aiplatform_v1.types.model import Port -from google.cloud.aiplatform_v1.types.model import PredictSchemata -from google.cloud.aiplatform_v1.types.model_evaluation import ModelEvaluation -from google.cloud.aiplatform_v1.types.model_evaluation_slice import ModelEvaluationSlice -from google.cloud.aiplatform_v1.types.model_service import DeleteModelRequest -from google.cloud.aiplatform_v1.types.model_service import ExportModelOperationMetadata -from google.cloud.aiplatform_v1.types.model_service import ExportModelRequest -from google.cloud.aiplatform_v1.types.model_service import ExportModelResponse -from google.cloud.aiplatform_v1.types.model_service import GetModelEvaluationRequest -from google.cloud.aiplatform_v1.types.model_service import GetModelEvaluationSliceRequest -from google.cloud.aiplatform_v1.types.model_service import GetModelRequest -from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationSlicesRequest -from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationSlicesResponse -from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationsRequest -from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationsResponse -from google.cloud.aiplatform_v1.types.model_service import ListModelsRequest -from google.cloud.aiplatform_v1.types.model_service import ListModelsResponse -from google.cloud.aiplatform_v1.types.model_service import UpdateModelRequest -from google.cloud.aiplatform_v1.types.model_service import UploadModelOperationMetadata -from google.cloud.aiplatform_v1.types.model_service import UploadModelRequest -from google.cloud.aiplatform_v1.types.model_service import UploadModelResponse -from google.cloud.aiplatform_v1.types.operation import DeleteOperationMetadata -from google.cloud.aiplatform_v1.types.operation import GenericOperationMetadata -from google.cloud.aiplatform_v1.types.pipeline_job import PipelineJob -from google.cloud.aiplatform_v1.types.pipeline_job import PipelineJobDetail -from google.cloud.aiplatform_v1.types.pipeline_job import PipelineTaskDetail -from google.cloud.aiplatform_v1.types.pipeline_job import PipelineTaskExecutorDetail -from google.cloud.aiplatform_v1.types.pipeline_service import CancelPipelineJobRequest -from google.cloud.aiplatform_v1.types.pipeline_service import CancelTrainingPipelineRequest -from google.cloud.aiplatform_v1.types.pipeline_service import CreatePipelineJobRequest -from google.cloud.aiplatform_v1.types.pipeline_service import CreateTrainingPipelineRequest -from google.cloud.aiplatform_v1.types.pipeline_service import DeletePipelineJobRequest -from google.cloud.aiplatform_v1.types.pipeline_service import DeleteTrainingPipelineRequest -from google.cloud.aiplatform_v1.types.pipeline_service import GetPipelineJobRequest -from google.cloud.aiplatform_v1.types.pipeline_service import GetTrainingPipelineRequest -from google.cloud.aiplatform_v1.types.pipeline_service import ListPipelineJobsRequest -from google.cloud.aiplatform_v1.types.pipeline_service import ListPipelineJobsResponse -from google.cloud.aiplatform_v1.types.pipeline_service import ListTrainingPipelinesRequest -from google.cloud.aiplatform_v1.types.pipeline_service import ListTrainingPipelinesResponse -from google.cloud.aiplatform_v1.types.pipeline_state import PipelineState -from google.cloud.aiplatform_v1.types.prediction_service import PredictRequest -from google.cloud.aiplatform_v1.types.prediction_service import PredictResponse -from google.cloud.aiplatform_v1.types.specialist_pool import SpecialistPool -from google.cloud.aiplatform_v1.types.specialist_pool_service import CreateSpecialistPoolOperationMetadata -from google.cloud.aiplatform_v1.types.specialist_pool_service import CreateSpecialistPoolRequest -from google.cloud.aiplatform_v1.types.specialist_pool_service import DeleteSpecialistPoolRequest -from google.cloud.aiplatform_v1.types.specialist_pool_service import GetSpecialistPoolRequest -from google.cloud.aiplatform_v1.types.specialist_pool_service import ListSpecialistPoolsRequest -from google.cloud.aiplatform_v1.types.specialist_pool_service import ListSpecialistPoolsResponse -from google.cloud.aiplatform_v1.types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata -from google.cloud.aiplatform_v1.types.specialist_pool_service import UpdateSpecialistPoolRequest -from google.cloud.aiplatform_v1.types.study import Measurement -from google.cloud.aiplatform_v1.types.study import StudySpec -from google.cloud.aiplatform_v1.types.study import Trial -from google.cloud.aiplatform_v1.types.training_pipeline import FilterSplit -from google.cloud.aiplatform_v1.types.training_pipeline import FractionSplit -from google.cloud.aiplatform_v1.types.training_pipeline import InputDataConfig -from google.cloud.aiplatform_v1.types.training_pipeline import PredefinedSplit -from google.cloud.aiplatform_v1.types.training_pipeline import TimestampSplit -from google.cloud.aiplatform_v1.types.training_pipeline import TrainingPipeline -from google.cloud.aiplatform_v1.types.user_action_reference import UserActionReference -from google.cloud.aiplatform_v1.types.value import Value - -__all__ = ('DatasetServiceClient', - 'DatasetServiceAsyncClient', - 'EndpointServiceClient', - 'EndpointServiceAsyncClient', - 'JobServiceClient', - 'JobServiceAsyncClient', - 'MigrationServiceClient', - 'MigrationServiceAsyncClient', - 'ModelServiceClient', - 'ModelServiceAsyncClient', - 'PipelineServiceClient', - 'PipelineServiceAsyncClient', - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', - 'SpecialistPoolServiceClient', - 'SpecialistPoolServiceAsyncClient', - 'AcceleratorType', - 'Annotation', - 'AnnotationSpec', - 'Artifact', - 'BatchPredictionJob', - 'CompletionStats', - 'Context', - 'ContainerSpec', - 'CustomJob', - 'CustomJobSpec', - 'PythonPackageSpec', - 'Scheduling', - 'WorkerPoolSpec', - 'DataItem', - 'ActiveLearningConfig', - 'DataLabelingJob', - 'SampleConfig', - 'TrainingConfig', - 'Dataset', - 'ExportDataConfig', - 'ImportDataConfig', - 'CreateDatasetOperationMetadata', - 'CreateDatasetRequest', - 'DeleteDatasetRequest', - 'ExportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'ImportDataOperationMetadata', - 'ImportDataRequest', - 'ImportDataResponse', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeployedModelRef', - 'EncryptionSpec', - 'DeployedModel', - 'Endpoint', - 'CreateEndpointOperationMetadata', - 'CreateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelOperationMetadata', - 'DeployModelRequest', - 'DeployModelResponse', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UndeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UpdateEndpointRequest', - 'EnvVar', - 'Execution', - 'HyperparameterTuningJob', - 'BigQueryDestination', - 'BigQuerySource', - 'ContainerRegistryDestination', - 'GcsDestination', - 'GcsSource', - 'CancelBatchPredictionJobRequest', - 'CancelCustomJobRequest', - 'CancelDataLabelingJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'CreateCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'DeleteBatchPredictionJobRequest', - 'DeleteCustomJobRequest', - 'DeleteDataLabelingJobRequest', - 'DeleteHyperparameterTuningJobRequest', - 'GetBatchPredictionJobRequest', - 'GetCustomJobRequest', - 'GetDataLabelingJobRequest', - 'GetHyperparameterTuningJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'JobState', - 'AutomaticResources', - 'AutoscalingMetricSpec', - 'BatchDedicatedResources', - 'DedicatedResources', - 'DiskSpec', - 'MachineSpec', - 'ResourcesConsumed', - 'ManualBatchTuningParameters', - 'MigratableResource', - 'BatchMigrateResourcesOperationMetadata', - 'BatchMigrateResourcesRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceRequest', - 'MigrateResourceResponse', - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'Model', - 'ModelContainerSpec', - 'Port', - 'PredictSchemata', - 'ModelEvaluation', - 'ModelEvaluationSlice', - 'DeleteModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelRequest', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'GetModelEvaluationSliceRequest', - 'GetModelRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelRequest', - 'UploadModelResponse', - 'DeleteOperationMetadata', - 'GenericOperationMetadata', - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - 'CancelPipelineJobRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'CreateTrainingPipelineRequest', - 'DeletePipelineJobRequest', - 'DeleteTrainingPipelineRequest', - 'GetPipelineJobRequest', - 'GetTrainingPipelineRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'PipelineState', - 'PredictRequest', - 'PredictResponse', - 'SpecialistPool', - 'CreateSpecialistPoolOperationMetadata', - 'CreateSpecialistPoolRequest', - 'DeleteSpecialistPoolRequest', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'UpdateSpecialistPoolOperationMetadata', - 'UpdateSpecialistPoolRequest', - 'Measurement', - 'StudySpec', - 'Trial', - 'FilterSplit', - 'FractionSplit', - 'InputDataConfig', - 'PredefinedSplit', - 'TimestampSplit', - 'TrainingPipeline', - 'UserActionReference', - 'Value', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/py.typed deleted file mode 100644 index 228f1c51c6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py deleted file mode 100644 index 41d6704c1f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_classification import ImageClassificationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_object_detection import ImageObjectDetectionPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_segmentation import ImageSegmentationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_classification import TextClassificationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_extraction import TextExtractionPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_sentiment import TextSentimentPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_action_recognition import VideoActionRecognitionPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_classification import VideoClassificationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ('ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed deleted file mode 100644 index f70e7f605a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py deleted file mode 100644 index 41ab5407a7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionInstance -from .types.image_object_detection import ImageObjectDetectionPredictionInstance -from .types.image_segmentation import ImageSegmentationPredictionInstance -from .types.text_classification import TextClassificationPredictionInstance -from .types.text_extraction import TextExtractionPredictionInstance -from .types.text_sentiment import TextSentimentPredictionInstance -from .types.video_action_recognition import VideoActionRecognitionPredictionInstance -from .types.video_classification import VideoClassificationPredictionInstance -from .types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ( -'ImageClassificationPredictionInstance', -'ImageObjectDetectionPredictionInstance', -'ImageSegmentationPredictionInstance', -'TextClassificationPredictionInstance', -'TextExtractionPredictionInstance', -'TextSentimentPredictionInstance', -'VideoActionRecognitionPredictionInstance', -'VideoClassificationPredictionInstance', -'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json deleted file mode 100644 index 0ae909d6ea..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.instance_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.predict.instance", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed deleted file mode 100644 index f70e7f605a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py deleted file mode 100644 index 80a5332604..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionInstance, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionInstance, -) -from .image_segmentation import ( - ImageSegmentationPredictionInstance, -) -from .text_classification import ( - TextClassificationPredictionInstance, -) -from .text_extraction import ( - TextExtractionPredictionInstance, -) -from .text_sentiment import ( - TextSentimentPredictionInstance, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionInstance, -) -from .video_classification import ( - VideoClassificationPredictionInstance, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionInstance, -) - -__all__ = ( - 'ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py deleted file mode 100644 index 02e95ba9c7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'ImageClassificationPredictionInstance', - }, -) - - -class ImageClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Image Classification. - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py deleted file mode 100644 index 0b9aadc101..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'ImageObjectDetectionPredictionInstance', - }, -) - - -class ImageObjectDetectionPredictionInstance(proto.Message): - r"""Prediction input format for Image Object Detection. - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py deleted file mode 100644 index f967807e6c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'ImageSegmentationPredictionInstance', - }, -) - - -class ImageSegmentationPredictionInstance(proto.Message): - r"""Prediction input format for Image Segmentation. - Attributes: - content (str): - The image bytes to make the predictions on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/png - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py deleted file mode 100644 index 4eec13516c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'TextClassificationPredictionInstance', - }, -) - - -class TextClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Text Classification. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py deleted file mode 100644 index ffecb3de51..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'TextExtractionPredictionInstance', - }, -) - - -class TextExtractionPredictionInstance(proto.Message): - r"""Prediction input format for Text Extraction. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - key (str): - This field is only used for batch prediction. - If a key is provided, the batch prediction - result will by mapped to this key. If omitted, - then the batch prediction result will contain - the entire input instance. Vertex AI will not - check if keys in the request are duplicates, so - it is up to the caller to ensure the keys are - unique. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - key = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py deleted file mode 100644 index 5bdfe5d5ba..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'TextSentimentPredictionInstance', - }, -) - - -class TextSentimentPredictionInstance(proto.Message): - r"""Prediction input format for Text Sentiment. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py deleted file mode 100644 index d53782868f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'VideoActionRecognitionPredictionInstance', - }, -) - - -class VideoActionRecognitionPredictionInstance(proto.Message): - r"""Prediction input format for Video Action Recognition. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py deleted file mode 100644 index b51ab464a4..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'VideoClassificationPredictionInstance', - }, -) - - -class VideoClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Video Classification. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py deleted file mode 100644 index 8b96f75fd2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'VideoObjectTrackingPredictionInstance', - }, -) - - -class VideoObjectTrackingPredictionInstance(proto.Message): - r"""Prediction input format for Video Object Tracking. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py deleted file mode 100644 index 91ae7f0d5c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_classification import ImageClassificationPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_object_detection import ImageObjectDetectionPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_segmentation import ImageSegmentationPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_action_recognition import VideoActionRecognitionPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_classification import VideoClassificationPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ('ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed deleted file mode 100644 index df96e61590..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py deleted file mode 100644 index 91b718b437..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionParams -from .types.image_object_detection import ImageObjectDetectionPredictionParams -from .types.image_segmentation import ImageSegmentationPredictionParams -from .types.video_action_recognition import VideoActionRecognitionPredictionParams -from .types.video_classification import VideoClassificationPredictionParams -from .types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ( -'ImageClassificationPredictionParams', -'ImageObjectDetectionPredictionParams', -'ImageSegmentationPredictionParams', -'VideoActionRecognitionPredictionParams', -'VideoClassificationPredictionParams', -'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json deleted file mode 100644 index edfffb441b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.params_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.predict.params", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed deleted file mode 100644 index df96e61590..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py deleted file mode 100644 index 70a92bb59c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionParams, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionParams, -) -from .image_segmentation import ( - ImageSegmentationPredictionParams, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionParams, -) -from .video_classification import ( - VideoClassificationPredictionParams, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionParams, -) - -__all__ = ( - 'ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py deleted file mode 100644 index 1668600544..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'ImageClassificationPredictionParams', - }, -) - - -class ImageClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Classification. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py deleted file mode 100644 index 43c7814607..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'ImageObjectDetectionPredictionParams', - }, -) - - -class ImageObjectDetectionPredictionParams(proto.Message): - r"""Prediction model parameters for Image Object Detection. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - Note that number of returned predictions is also - limited by metadata's predictionsLimit. Default - value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py deleted file mode 100644 index 695a3a7745..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'ImageSegmentationPredictionParams', - }, -) - - -class ImageSegmentationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Segmentation. - Attributes: - confidence_threshold (float): - When the model predicts category of pixels of - the image, it will only provide predictions for - pixels that it is at least this much confident - about. All other pixels will be classified as - background. Default value is 0.5. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py deleted file mode 100644 index 88e714e9cf..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'VideoActionRecognitionPredictionParams', - }, -) - - -class VideoActionRecognitionPredictionParams(proto.Message): - r"""Prediction model parameters for Video Action Recognition. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py deleted file mode 100644 index 06b2347eb6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'VideoClassificationPredictionParams', - }, -) - - -class VideoClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Video Classification. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is - 10,000. - segment_classification (bool): - Set to true to request segment-level - classification. Vertex AI returns labels and - their confidence scores for the entire time - segment of the video that user specified in the - input instance. Default value is true - shot_classification (bool): - Set to true to request shot-level - classification. Vertex AI determines the - boundaries for each camera shot in the entire - time segment of the video that user specified in - the input instance. Vertex AI then returns - labels and their confidence scores for each - detected shot, along with the start and end time - of the shot. - WARNING: Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. - Default value is false - one_sec_interval_classification (bool): - Set to true to request classification for a - video at one-second intervals. Vertex AI returns - labels and their confidence scores for each - second of the entire time segment of the video - that user specified in the input WARNING: Model - evaluation is not done for this classification - type, the quality of it depends on the training - data, but there are no metrics provided to - describe that quality. Default value is false - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - segment_classification = proto.Field( - proto.BOOL, - number=3, - ) - shot_classification = proto.Field( - proto.BOOL, - number=4, - ) - one_sec_interval_classification = proto.Field( - proto.BOOL, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py deleted file mode 100644 index 820a73e3c6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'VideoObjectTrackingPredictionParams', - }, -) - - -class VideoObjectTrackingPredictionParams(proto.Message): - r"""Prediction model parameters for Video Object Tracking. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - min_bounding_box_size (float): - Only bounding boxes with shortest edge at - least that long as a relative value of video - frame size are returned. Default value is 0.0. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - min_bounding_box_size = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py deleted file mode 100644 index 27d9f97862..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.classification import ClassificationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_object_detection import ImageObjectDetectionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_segmentation import ImageSegmentationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_classification import TabularClassificationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_regression import TabularRegressionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_extraction import TextExtractionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_sentiment import TextSentimentPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_action_recognition import VideoActionRecognitionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_classification import VideoClassificationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ('ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed deleted file mode 100644 index 472fa4d8cc..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py deleted file mode 100644 index 3cf9304526..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.classification import ClassificationPredictionResult -from .types.image_object_detection import ImageObjectDetectionPredictionResult -from .types.image_segmentation import ImageSegmentationPredictionResult -from .types.tabular_classification import TabularClassificationPredictionResult -from .types.tabular_regression import TabularRegressionPredictionResult -from .types.text_extraction import TextExtractionPredictionResult -from .types.text_sentiment import TextSentimentPredictionResult -from .types.video_action_recognition import VideoActionRecognitionPredictionResult -from .types.video_classification import VideoClassificationPredictionResult -from .types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ( -'ClassificationPredictionResult', -'ImageObjectDetectionPredictionResult', -'ImageSegmentationPredictionResult', -'TabularClassificationPredictionResult', -'TabularRegressionPredictionResult', -'TextExtractionPredictionResult', -'TextSentimentPredictionResult', -'VideoActionRecognitionPredictionResult', -'VideoClassificationPredictionResult', -'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json deleted file mode 100644 index ba1d67a00c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.prediction_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.predict.prediction", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed deleted file mode 100644 index 472fa4d8cc..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py deleted file mode 100644 index b7b7c056aa..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .classification import ( - ClassificationPredictionResult, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionResult, -) -from .image_segmentation import ( - ImageSegmentationPredictionResult, -) -from .tabular_classification import ( - TabularClassificationPredictionResult, -) -from .tabular_regression import ( - TabularRegressionPredictionResult, -) -from .text_extraction import ( - TextExtractionPredictionResult, -) -from .text_sentiment import ( - TextSentimentPredictionResult, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionResult, -) -from .video_classification import ( - VideoClassificationPredictionResult, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionResult, -) - -__all__ = ( - 'ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py deleted file mode 100644 index f14b084951..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'ClassificationPredictionResult', - }, -) - - -class ClassificationPredictionResult(proto.Message): - r"""Prediction output format for Image and Text Classification. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py deleted file mode 100644 index 74178c5502..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'ImageObjectDetectionPredictionResult', - }, -) - - -class ImageObjectDetectionPredictionResult(proto.Message): - r"""Prediction output format for Image Object Detection. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): - Bounding boxes, i.e. the rectangles over the image, that - pinpoint the found AnnotationSpecs. Given in order that - matches the IDs. Each bounding box is an array of 4 numbers - ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent - the extremal coordinates of the box. They are relative to - the image size, and the point 0,0 is in the top left of the - image. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - bboxes = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=struct_pb2.ListValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py deleted file mode 100644 index e93991222a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'ImageSegmentationPredictionResult', - }, -) - - -class ImageSegmentationPredictionResult(proto.Message): - r"""Prediction output format for Image Segmentation. - Attributes: - category_mask (str): - A PNG image where each pixel in the mask - represents the category in which the pixel in - the original image was predicted to belong to. - The size of this image will be the same as the - original image. The mapping between the - AnntoationSpec and the color can be found in - model's metadata. The model will choose the most - likely category and if none of the categories - reach the confidence threshold, the pixel will - be marked as background. - confidence_mask (str): - A one channel image which is encoded as an - 8bit lossless PNG. The size of the image will be - the same as the original image. For a specific - pixel, darker color means less confidence in - correctness of the cateogry in the categoryMask - for the corresponding pixel. Black means no - confidence and white means complete confidence. - """ - - category_mask = proto.Field( - proto.STRING, - number=1, - ) - confidence_mask = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py deleted file mode 100644 index a36bf8f991..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TabularClassificationPredictionResult', - }, -) - - -class TabularClassificationPredictionResult(proto.Message): - r"""Prediction output format for Tabular Classification. - Attributes: - classes (Sequence[str]): - The name of the classes being classified, - contains all possible values of the target - column. - scores (Sequence[float]): - The model's confidence in each class being - correct, higher value means higher confidence. - The N-th score corresponds to the N-th class in - classes. - """ - - classes = proto.RepeatedField( - proto.STRING, - number=1, - ) - scores = proto.RepeatedField( - proto.FLOAT, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py deleted file mode 100644 index 56af2af196..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TabularRegressionPredictionResult', - }, -) - - -class TabularRegressionPredictionResult(proto.Message): - r"""Prediction output format for Tabular Regression. - Attributes: - value (float): - The regression value. - lower_bound (float): - The lower bound of the prediction interval. - upper_bound (float): - The upper bound of the prediction interval. - """ - - value = proto.Field( - proto.FLOAT, - number=1, - ) - lower_bound = proto.Field( - proto.FLOAT, - number=2, - ) - upper_bound = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py deleted file mode 100644 index 3e7398f165..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TextExtractionPredictionResult', - }, -) - - -class TextExtractionPredictionResult(proto.Message): - r"""Prediction output format for Text Extraction. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - text_segment_start_offsets (Sequence[int]): - The start offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - text_segment_end_offsets (Sequence[int]): - The end offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - text_segment_start_offsets = proto.RepeatedField( - proto.INT64, - number=3, - ) - text_segment_end_offsets = proto.RepeatedField( - proto.INT64, - number=4, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py deleted file mode 100644 index 135db45729..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TextSentimentPredictionResult', - }, -) - - -class TextSentimentPredictionResult(proto.Message): - r"""Prediction output format for Text Sentiment - Attributes: - sentiment (int): - The integer sentiment labels between 0 - (inclusive) and sentimentMax label (inclusive), - while 0 maps to the least positive sentiment and - sentimentMax maps to the most positive one. The - higher the score is, the more positive the - sentiment in the text snippet is. Note: - sentimentMax is an integer value between 1 - (inclusive) and 10 (inclusive). - """ - - sentiment = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py deleted file mode 100644 index 5a853655ae..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'VideoActionRecognitionPredictionResult', - }, -) - - -class VideoActionRecognitionPredictionResult(proto.Message): - r"""Prediction output format for Video Action Recognition. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py deleted file mode 100644 index da14b3253e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'VideoClassificationPredictionResult', - }, -) - - -class VideoClassificationPredictionResult(proto.Message): - r"""Prediction output format for Video Classification. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - type_ (str): - The type of the prediction. The requested - types can be configured via parameters. This - will be one of - segment-classification - - shot-classification - - one-sec-interval-classification - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentStart' from the - input instance, for other types it is the start - of a shot or a 1 second interval respectively. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentEnd' from the - input instance, for other types it is the end of - a shot or a 1 second interval respectively. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - type_ = proto.Field( - proto.STRING, - number=3, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py deleted file mode 100644 index 9b70e913cd..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'VideoObjectTrackingPredictionResult', - }, -) - - -class VideoObjectTrackingPredictionResult(proto.Message): - r"""Prediction output format for Video Object Tracking. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - frames (Sequence[google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.VideoObjectTrackingPredictionResult.Frame]): - All of the frames of the video in which a - single object instance has been detected. The - bounding boxes in the frames identify the same - object. - """ - - class Frame(proto.Message): - r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a - bounding box, i.e. the rectangle over the video frame pinpointing - the found AnnotationSpec. The coordinates are relative to the frame - size, and the point 0,0 is in the top left of the frame. - - Attributes: - time_offset (google.protobuf.duration_pb2.Duration): - A time (frame) of a video in which the object - has been detected. Expressed as a number of - seconds as measured from the start of the video, - with fractions up to a microsecond precision, - and with "s" appended at the end. - x_min (google.protobuf.wrappers_pb2.FloatValue): - The leftmost coordinate of the bounding box. - x_max (google.protobuf.wrappers_pb2.FloatValue): - The rightmost coordinate of the bounding box. - y_min (google.protobuf.wrappers_pb2.FloatValue): - The topmost coordinate of the bounding box. - y_max (google.protobuf.wrappers_pb2.FloatValue): - The bottommost coordinate of the bounding - box. - """ - - time_offset = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - x_min = proto.Field( - proto.MESSAGE, - number=2, - message=wrappers_pb2.FloatValue, - ) - x_max = proto.Field( - proto.MESSAGE, - number=3, - message=wrappers_pb2.FloatValue, - ) - y_min = proto.Field( - proto.MESSAGE, - number=4, - message=wrappers_pb2.FloatValue, - ) - y_max = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=3, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - frames = proto.RepeatedField( - proto.MESSAGE, - number=6, - message=Frame, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py deleted file mode 100644 index 0e86266695..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassification -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassificationInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassificationMetadata -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetection -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetectionInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentation -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentationInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentationMetadata -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTables -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTablesInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTablesMetadata -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import AutoMlTextClassification -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import AutoMlTextClassificationInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import AutoMlTextExtraction -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import AutoMlTextExtractionInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import AutoMlTextSentiment -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import AutoMlTextSentimentInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import AutoMlVideoActionRecognition -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import AutoMlVideoClassification -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import AutoMlVideoClassificationInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import AutoMlVideoObjectTracking -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig - -__all__ = ('AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - 'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed deleted file mode 100644 index 1a9d2972a0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py deleted file mode 100644 index f4e2447d46..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.automl_image_classification import AutoMlImageClassification -from .types.automl_image_classification import AutoMlImageClassificationInputs -from .types.automl_image_classification import AutoMlImageClassificationMetadata -from .types.automl_image_object_detection import AutoMlImageObjectDetection -from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs -from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata -from .types.automl_image_segmentation import AutoMlImageSegmentation -from .types.automl_image_segmentation import AutoMlImageSegmentationInputs -from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata -from .types.automl_tables import AutoMlTables -from .types.automl_tables import AutoMlTablesInputs -from .types.automl_tables import AutoMlTablesMetadata -from .types.automl_text_classification import AutoMlTextClassification -from .types.automl_text_classification import AutoMlTextClassificationInputs -from .types.automl_text_extraction import AutoMlTextExtraction -from .types.automl_text_extraction import AutoMlTextExtractionInputs -from .types.automl_text_sentiment import AutoMlTextSentiment -from .types.automl_text_sentiment import AutoMlTextSentimentInputs -from .types.automl_video_action_recognition import AutoMlVideoActionRecognition -from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs -from .types.automl_video_classification import AutoMlVideoClassification -from .types.automl_video_classification import AutoMlVideoClassificationInputs -from .types.automl_video_object_tracking import AutoMlVideoObjectTracking -from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs -from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig - -__all__ = ( -'AutoMlImageClassification', -'AutoMlImageClassificationInputs', -'AutoMlImageClassificationMetadata', -'AutoMlImageObjectDetection', -'AutoMlImageObjectDetectionInputs', -'AutoMlImageObjectDetectionMetadata', -'AutoMlImageSegmentation', -'AutoMlImageSegmentationInputs', -'AutoMlImageSegmentationMetadata', -'AutoMlTables', -'AutoMlTablesInputs', -'AutoMlTablesMetadata', -'AutoMlTextClassification', -'AutoMlTextClassificationInputs', -'AutoMlTextExtraction', -'AutoMlTextExtractionInputs', -'AutoMlTextSentiment', -'AutoMlTextSentimentInputs', -'AutoMlVideoActionRecognition', -'AutoMlVideoActionRecognitionInputs', -'AutoMlVideoClassification', -'AutoMlVideoClassificationInputs', -'AutoMlVideoObjectTracking', -'AutoMlVideoObjectTrackingInputs', -'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json deleted file mode 100644 index 620ff75f05..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.trainingjob.definition_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.trainingjob.definition", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed deleted file mode 100644 index 1a9d2972a0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py deleted file mode 100644 index 4b8bb9425b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .automl_image_classification import ( - AutoMlImageClassification, - AutoMlImageClassificationInputs, - AutoMlImageClassificationMetadata, -) -from .automl_image_object_detection import ( - AutoMlImageObjectDetection, - AutoMlImageObjectDetectionInputs, - AutoMlImageObjectDetectionMetadata, -) -from .automl_image_segmentation import ( - AutoMlImageSegmentation, - AutoMlImageSegmentationInputs, - AutoMlImageSegmentationMetadata, -) -from .automl_tables import ( - AutoMlTables, - AutoMlTablesInputs, - AutoMlTablesMetadata, -) -from .automl_text_classification import ( - AutoMlTextClassification, - AutoMlTextClassificationInputs, -) -from .automl_text_extraction import ( - AutoMlTextExtraction, - AutoMlTextExtractionInputs, -) -from .automl_text_sentiment import ( - AutoMlTextSentiment, - AutoMlTextSentimentInputs, -) -from .automl_video_action_recognition import ( - AutoMlVideoActionRecognition, - AutoMlVideoActionRecognitionInputs, -) -from .automl_video_classification import ( - AutoMlVideoClassification, - AutoMlVideoClassificationInputs, -) -from .automl_video_object_tracking import ( - AutoMlVideoObjectTracking, - AutoMlVideoObjectTrackingInputs, -) -from .export_evaluated_data_items_config import ( - ExportEvaluatedDataItemsConfig, -) - -__all__ = ( - 'AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - 'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py deleted file mode 100644 index 8046ad8725..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py +++ /dev/null @@ -1,156 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - }, -) - - -class AutoMlImageClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageClassificationInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageClassificationMetadata', - ) - - -class AutoMlImageClassificationInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs.ModelType): - - base_model_id (str): - The ID of the ``base`` model. If it is specified, the new - model will be trained based on the ``base`` model. - Otherwise, the new model will be trained from scratch. The - ``base`` model must be in the same Project and Location as - the new Model to train, and have the same modelType. - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. For modelType - ``cloud``\ (default), the budget must be between 8,000 and - 800,000 milli node hours, inclusive. The default value is - 192,000 which represents one day in wall time, considering 8 - nodes are used. For model types ``mobile-tf-low-latency-1``, - ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, - the training budget must be between 1,000 and 100,000 milli - node hours, inclusive. The default value is 24,000 which - represents one day in wall time on a single node that is - used. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. When false the early - stopping feature is enabled, which means that - AutoML Image Classification might stop training - before the entire training budget has been used. - multi_label (bool): - If false, a single-label (multi-class) Model - will be trained (i.e. assuming that for each - image just up to one annotation may be - applicable). If true, a multi-label Model will - be trained (i.e. assuming that for each image - multiple annotations may be applicable). - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_TF_LOW_LATENCY_1 = 2 - MOBILE_TF_VERSATILE_1 = 3 - MOBILE_TF_HIGH_ACCURACY_1 = 4 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - base_model_id = proto.Field( - proto.STRING, - number=2, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=3, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=4, - ) - multi_label = proto.Field( - proto.BOOL, - number=5, - ) - - -class AutoMlImageClassificationMetadata(proto.Message): - r""" - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py deleted file mode 100644 index 52b7bbee80..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - }, -) - - -class AutoMlImageObjectDetection(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image Object - Detection Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata): - The metadata information - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageObjectDetectionInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageObjectDetectionMetadata', - ) - - -class AutoMlImageObjectDetectionInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs.ModelType): - - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. For modelType - ``cloud``\ (default), the budget must be between 20,000 and - 900,000 milli node hours, inclusive. The default value is - 216,000 which represents one day in wall time, considering 9 - nodes are used. For model types ``mobile-tf-low-latency-1``, - ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the - training budget must be between 1,000 and 100,000 milli node - hours, inclusive. The default value is 24,000 which - represents one day in wall time on a single node that is - used. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. When false the early - stopping feature is enabled, which means that - AutoML Image Object Detection might stop - training before the entire training budget has - been used. - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD_HIGH_ACCURACY_1 = 1 - CLOUD_LOW_LATENCY_1 = 2 - MOBILE_TF_LOW_LATENCY_1 = 3 - MOBILE_TF_VERSATILE_1 = 4 - MOBILE_TF_HIGH_ACCURACY_1 = 5 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=2, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=3, - ) - - -class AutoMlImageObjectDetectionMetadata(proto.Message): - r""" - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py deleted file mode 100644 index 8e3728f200..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - }, -) - - -class AutoMlImageSegmentation(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image - Segmentation Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageSegmentationInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageSegmentationMetadata', - ) - - -class AutoMlImageSegmentationInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs.ModelType): - - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. Or actaul_wall_clock_hours = - train_budget_milli_node_hours / (number_of_nodes_involved \* - 1000) For modelType ``cloud-high-accuracy-1``\ (default), - the budget must be between 20,000 and 2,000,000 milli node - hours, inclusive. The default value is 192,000 which - represents one day in wall time (1000 milli \* 24 hours \* 8 - nodes). - base_model_id (str): - The ID of the ``base`` model. If it is specified, the new - model will be trained based on the ``base`` model. - Otherwise, the new model will be trained from scratch. The - ``base`` model must be in the same Project and Location as - the new Model to train, and have the same modelType. - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD_HIGH_ACCURACY_1 = 1 - CLOUD_LOW_ACCURACY_1 = 2 - MOBILE_TF_LOW_LATENCY_1 = 3 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=2, - ) - base_model_id = proto.Field( - proto.STRING, - number=3, - ) - - -class AutoMlImageSegmentationMetadata(proto.Message): - r""" - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py deleted file mode 100644 index 26823412a8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py +++ /dev/null @@ -1,499 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - }, -) - - -class AutoMlTables(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Tables Model. - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTablesInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlTablesMetadata', - ) - - -class AutoMlTablesInputs(proto.Message): - r""" - Attributes: - optimization_objective_recall_value (float): - Required when optimization_objective is - "maximize-precision-at-recall". Must be between 0 and 1, - inclusive. - optimization_objective_precision_value (float): - Required when optimization_objective is - "maximize-recall-at-precision". Must be between 0 and 1, - inclusive. - prediction_type (str): - The type of prediction the Model is to - produce. "classification" - Predict one out of - multiple target values is - picked for each row. - "regression" - Predict a value based on its - relation to other values. This - type is available only to columns that contain - semantically numeric values, i.e. integers or - floating point number, even if - stored as e.g. strings. - target_column (str): - The column name of the target column that the - model is to predict. - transformations (Sequence[google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation]): - Each transformation will apply transform - function to given input column. And the result - will be used for training. When creating - transformation for BigQuery Struct column, the - column should be flattened using "." as the - delimiter. - optimization_objective (str): - Objective function the model is optimizing - towards. The training process creates a model - that maximizes/minimizes the value of the - objective function over the validation set. - - The supported optimization objectives depend on - the prediction type. If the field is not set, a - default objective function is used. - classification (binary): - "maximize-au-roc" (default) - Maximize the - area under the receiver - operating characteristic (ROC) curve. - "minimize-log-loss" - Minimize log loss. - "maximize-au-prc" - Maximize the area under - the precision-recall curve. "maximize- - precision-at-recall" - Maximize precision for a - specified - recall value. "maximize-recall-at-precision" - - Maximize recall for a specified - precision value. - classification (multi-class): - "minimize-log-loss" (default) - Minimize log - loss. - regression: - "minimize-rmse" (default) - Minimize root- - mean-squared error (RMSE). "minimize-mae" - - Minimize mean-absolute error (MAE). "minimize- - rmsle" - Minimize root-mean-squared log error - (RMSLE). - train_budget_milli_node_hours (int): - Required. The train budget of creating this - model, expressed in milli node hours i.e. 1,000 - value in this field means 1 node hour. - The training cost of the model will not exceed - this budget. The final cost will be attempted to - be close to the budget, though may end up being - (even) noticeably smaller - at the backend's - discretion. This especially may happen when - further model training ceases to provide any - improvements. - If the budget is set to a value known to be - insufficient to train a model for the given - dataset, the training won't be attempted and - will error. - - The train budget must be between 1,000 and - 72,000 milli node hours, inclusive. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. By default, the - early stopping feature is enabled, which means - that AutoML Tables might stop training before - the entire training budget has been used. - weight_column_name (str): - Column name that should be used as the weight - column. Higher values in this column give more - importance to the row during model training. The - column must have numeric values between 0 and - 10000 inclusively; 0 means the row is ignored - for training. If weight column field is not set, - then all rows are assumed to have equal weight - of 1. - export_evaluated_data_items_config (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.ExportEvaluatedDataItemsConfig): - Configuration for exporting test set - predictions to a BigQuery table. If this - configuration is absent, then the export is not - performed. - additional_experiments (Sequence[str]): - Additional experiment flags for the Tables - training pipeline. - """ - - class Transformation(proto.Message): - r""" - Attributes: - auto (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.AutoTransformation): - - numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericTransformation): - - categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation): - - timestamp (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TimestampTransformation): - - text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextTransformation): - - repeated_numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation): - - repeated_categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): - - repeated_text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation): - - """ - - class AutoTransformation(proto.Message): - r"""Training pipeline will infer the proper transformation based - on the statistic of dataset. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The value converted to float32. - - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - A boolean value that indicates whether the value is valid. - - Attributes: - column_name (str): - - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=2, - ) - - class CategoricalTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TimestampTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the - - timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. - - Attributes: - column_name (str): - - time_format (str): - The format in which that time field is expressed. The - time_format must either be one of: - - - ``unix-seconds`` - - ``unix-milliseconds`` - - ``unix-microseconds`` - - ``unix-nanoseconds`` (for respectively number of seconds, - milliseconds, microseconds and nanoseconds since start of - the Unix epoch); or be written in ``strftime`` syntax. If - time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = - ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - time_format = proto.Field( - proto.STRING, - number=2, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=3, - ) - - class TextTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - Tokenize text to words. Convert each words to a dictionary lookup - index and generate an embedding for each index. Combine the - embedding of all elements into a single embedding using the mean. - - Tokenization is based on unicode script boundaries. - - Missing values get their own lookup index and resulting - embedding. - - Stop-words receive no special treatment and are not removed. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericArrayTransformation(proto.Message): - r"""Treats the column as numerical array and performs following - transformation functions. - - - All transformations for Numerical types applied to the average of - the all elements. - - The average of empty arrays is treated as zero. - - Attributes: - column_name (str): - - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=2, - ) - - class CategoricalArrayTransformation(proto.Message): - r"""Treats the column as categorical array and performs following - transformation functions. - - - For each element in the array, convert the category name to a - dictionary lookup index and generate an embedding for each index. - Combine the embedding of all elements into a single embedding - using the mean. - - Empty arrays treated as an embedding of zeroes. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TextArrayTransformation(proto.Message): - r"""Treats the column as text array and performs following - transformation functions. - - - Concatenate all text values in the array into a single text value - using a space (" ") as a delimiter, and then treat the result as - a single text value. Apply the transformations for Text columns. - - Empty arrays treated as an empty text. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - auto = proto.Field( - proto.MESSAGE, - number=1, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.AutoTransformation', - ) - numeric = proto.Field( - proto.MESSAGE, - number=2, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.NumericTransformation', - ) - categorical = proto.Field( - proto.MESSAGE, - number=3, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.CategoricalTransformation', - ) - timestamp = proto.Field( - proto.MESSAGE, - number=4, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TimestampTransformation', - ) - text = proto.Field( - proto.MESSAGE, - number=5, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TextTransformation', - ) - repeated_numeric = proto.Field( - proto.MESSAGE, - number=6, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.NumericArrayTransformation', - ) - repeated_categorical = proto.Field( - proto.MESSAGE, - number=7, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.CategoricalArrayTransformation', - ) - repeated_text = proto.Field( - proto.MESSAGE, - number=8, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TextArrayTransformation', - ) - - optimization_objective_recall_value = proto.Field( - proto.FLOAT, - number=5, - oneof='additional_optimization_objective_config', - ) - optimization_objective_precision_value = proto.Field( - proto.FLOAT, - number=6, - oneof='additional_optimization_objective_config', - ) - prediction_type = proto.Field( - proto.STRING, - number=1, - ) - target_column = proto.Field( - proto.STRING, - number=2, - ) - transformations = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Transformation, - ) - optimization_objective = proto.Field( - proto.STRING, - number=4, - ) - train_budget_milli_node_hours = proto.Field( - proto.INT64, - number=7, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=8, - ) - weight_column_name = proto.Field( - proto.STRING, - number=9, - ) - export_evaluated_data_items_config = proto.Field( - proto.MESSAGE, - number=10, - message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, - ) - additional_experiments = proto.RepeatedField( - proto.STRING, - number=11, - ) - - -class AutoMlTablesMetadata(proto.Message): - r"""Model metadata specific to AutoML Tables. - Attributes: - train_cost_milli_node_hours (int): - Output only. The actual training cost of the - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed the train budget. - """ - - train_cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py deleted file mode 100644 index c1fb171c48..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - }, -) - - -class AutoMlTextClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextClassificationInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextClassificationInputs', - ) - - -class AutoMlTextClassificationInputs(proto.Message): - r""" - Attributes: - multi_label (bool): - - """ - - multi_label = proto.Field( - proto.BOOL, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py deleted file mode 100644 index 50963784c9..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - }, -) - - -class AutoMlTextExtraction(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Extraction Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextExtractionInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextExtractionInputs', - ) - - -class AutoMlTextExtractionInputs(proto.Message): - r""" """ - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py deleted file mode 100644 index 9f571275b7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - }, -) - - -class AutoMlTextSentiment(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Sentiment Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextSentimentInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextSentimentInputs', - ) - - -class AutoMlTextSentimentInputs(proto.Message): - r""" - Attributes: - sentiment_max (int): - A sentiment is expressed as an integer - ordinal, where higher value means a more - positive sentiment. The range of sentiments that - will be used is between 0 and sentimentMax - (inclusive on both ends), and all the values in - the range must be represented in the dataset - before a model can be created. - Only the Annotations with this sentimentMax will - be used for training. sentimentMax value must be - between 1 and 10 (inclusive). - """ - - sentiment_max = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py deleted file mode 100644 index 437445c667..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - }, -) - - -class AutoMlVideoActionRecognition(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video Action - Recognition Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoActionRecognitionInputs', - ) - - -class AutoMlVideoActionRecognitionInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_JETSON_VERSATILE_1 = 3 - MOBILE_CORAL_VERSATILE_1 = 4 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py deleted file mode 100644 index d78158615a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - }, -) - - -class AutoMlVideoClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoClassificationInputs', - ) - - -class AutoMlVideoClassificationInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_JETSON_VERSATILE_1 = 3 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py deleted file mode 100644 index 8ec377878b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - }, -) - - -class AutoMlVideoObjectTracking(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video - ObjectTracking Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoObjectTrackingInputs', - ) - - -class AutoMlVideoObjectTrackingInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_CORAL_VERSATILE_1 = 3 - MOBILE_CORAL_LOW_LATENCY_1 = 4 - MOBILE_JETSON_VERSATILE_1 = 5 - MOBILE_JETSON_LOW_LATENCY_1 = 6 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py deleted file mode 100644 index 8ac69278ee..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'ExportEvaluatedDataItemsConfig', - }, -) - - -class ExportEvaluatedDataItemsConfig(proto.Message): - r"""Configuration for exporting test set predictions to a - BigQuery table. - - Attributes: - destination_bigquery_uri (str): - URI of desired destination BigQuery table. Expected format: - bq://:: - - If not specified, then results are exported to the following - auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples - override_existing_table (bool): - If true and an export destination is - specified, then the contents of the destination - are overwritten. Otherwise, if the export - destination already exists, then the export - operation fails. - """ - - destination_bigquery_uri = proto.Field( - proto.STRING, - number=1, - ) - override_existing_table = proto.Field( - proto.BOOL, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py deleted file mode 100644 index ec7999298c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py +++ /dev/null @@ -1,390 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from .services.dataset_service import DatasetServiceClient -from .services.dataset_service import DatasetServiceAsyncClient -from .services.endpoint_service import EndpointServiceClient -from .services.endpoint_service import EndpointServiceAsyncClient -from .services.job_service import JobServiceClient -from .services.job_service import JobServiceAsyncClient -from .services.migration_service import MigrationServiceClient -from .services.migration_service import MigrationServiceAsyncClient -from .services.model_service import ModelServiceClient -from .services.model_service import ModelServiceAsyncClient -from .services.pipeline_service import PipelineServiceClient -from .services.pipeline_service import PipelineServiceAsyncClient -from .services.prediction_service import PredictionServiceClient -from .services.prediction_service import PredictionServiceAsyncClient -from .services.specialist_pool_service import SpecialistPoolServiceClient -from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient - -from .types.accelerator_type import AcceleratorType -from .types.annotation import Annotation -from .types.annotation_spec import AnnotationSpec -from .types.artifact import Artifact -from .types.batch_prediction_job import BatchPredictionJob -from .types.completion_stats import CompletionStats -from .types.context import Context -from .types.custom_job import ContainerSpec -from .types.custom_job import CustomJob -from .types.custom_job import CustomJobSpec -from .types.custom_job import PythonPackageSpec -from .types.custom_job import Scheduling -from .types.custom_job import WorkerPoolSpec -from .types.data_item import DataItem -from .types.data_labeling_job import ActiveLearningConfig -from .types.data_labeling_job import DataLabelingJob -from .types.data_labeling_job import SampleConfig -from .types.data_labeling_job import TrainingConfig -from .types.dataset import Dataset -from .types.dataset import ExportDataConfig -from .types.dataset import ImportDataConfig -from .types.dataset_service import CreateDatasetOperationMetadata -from .types.dataset_service import CreateDatasetRequest -from .types.dataset_service import DeleteDatasetRequest -from .types.dataset_service import ExportDataOperationMetadata -from .types.dataset_service import ExportDataRequest -from .types.dataset_service import ExportDataResponse -from .types.dataset_service import GetAnnotationSpecRequest -from .types.dataset_service import GetDatasetRequest -from .types.dataset_service import ImportDataOperationMetadata -from .types.dataset_service import ImportDataRequest -from .types.dataset_service import ImportDataResponse -from .types.dataset_service import ListAnnotationsRequest -from .types.dataset_service import ListAnnotationsResponse -from .types.dataset_service import ListDataItemsRequest -from .types.dataset_service import ListDataItemsResponse -from .types.dataset_service import ListDatasetsRequest -from .types.dataset_service import ListDatasetsResponse -from .types.dataset_service import UpdateDatasetRequest -from .types.deployed_model_ref import DeployedModelRef -from .types.encryption_spec import EncryptionSpec -from .types.endpoint import DeployedModel -from .types.endpoint import Endpoint -from .types.endpoint_service import CreateEndpointOperationMetadata -from .types.endpoint_service import CreateEndpointRequest -from .types.endpoint_service import DeleteEndpointRequest -from .types.endpoint_service import DeployModelOperationMetadata -from .types.endpoint_service import DeployModelRequest -from .types.endpoint_service import DeployModelResponse -from .types.endpoint_service import GetEndpointRequest -from .types.endpoint_service import ListEndpointsRequest -from .types.endpoint_service import ListEndpointsResponse -from .types.endpoint_service import UndeployModelOperationMetadata -from .types.endpoint_service import UndeployModelRequest -from .types.endpoint_service import UndeployModelResponse -from .types.endpoint_service import UpdateEndpointRequest -from .types.env_var import EnvVar -from .types.execution import Execution -from .types.hyperparameter_tuning_job import HyperparameterTuningJob -from .types.io import BigQueryDestination -from .types.io import BigQuerySource -from .types.io import ContainerRegistryDestination -from .types.io import GcsDestination -from .types.io import GcsSource -from .types.job_service import CancelBatchPredictionJobRequest -from .types.job_service import CancelCustomJobRequest -from .types.job_service import CancelDataLabelingJobRequest -from .types.job_service import CancelHyperparameterTuningJobRequest -from .types.job_service import CreateBatchPredictionJobRequest -from .types.job_service import CreateCustomJobRequest -from .types.job_service import CreateDataLabelingJobRequest -from .types.job_service import CreateHyperparameterTuningJobRequest -from .types.job_service import DeleteBatchPredictionJobRequest -from .types.job_service import DeleteCustomJobRequest -from .types.job_service import DeleteDataLabelingJobRequest -from .types.job_service import DeleteHyperparameterTuningJobRequest -from .types.job_service import GetBatchPredictionJobRequest -from .types.job_service import GetCustomJobRequest -from .types.job_service import GetDataLabelingJobRequest -from .types.job_service import GetHyperparameterTuningJobRequest -from .types.job_service import ListBatchPredictionJobsRequest -from .types.job_service import ListBatchPredictionJobsResponse -from .types.job_service import ListCustomJobsRequest -from .types.job_service import ListCustomJobsResponse -from .types.job_service import ListDataLabelingJobsRequest -from .types.job_service import ListDataLabelingJobsResponse -from .types.job_service import ListHyperparameterTuningJobsRequest -from .types.job_service import ListHyperparameterTuningJobsResponse -from .types.job_state import JobState -from .types.machine_resources import AutomaticResources -from .types.machine_resources import AutoscalingMetricSpec -from .types.machine_resources import BatchDedicatedResources -from .types.machine_resources import DedicatedResources -from .types.machine_resources import DiskSpec -from .types.machine_resources import MachineSpec -from .types.machine_resources import ResourcesConsumed -from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters -from .types.migratable_resource import MigratableResource -from .types.migration_service import BatchMigrateResourcesOperationMetadata -from .types.migration_service import BatchMigrateResourcesRequest -from .types.migration_service import BatchMigrateResourcesResponse -from .types.migration_service import MigrateResourceRequest -from .types.migration_service import MigrateResourceResponse -from .types.migration_service import SearchMigratableResourcesRequest -from .types.migration_service import SearchMigratableResourcesResponse -from .types.model import Model -from .types.model import ModelContainerSpec -from .types.model import Port -from .types.model import PredictSchemata -from .types.model_evaluation import ModelEvaluation -from .types.model_evaluation_slice import ModelEvaluationSlice -from .types.model_service import DeleteModelRequest -from .types.model_service import ExportModelOperationMetadata -from .types.model_service import ExportModelRequest -from .types.model_service import ExportModelResponse -from .types.model_service import GetModelEvaluationRequest -from .types.model_service import GetModelEvaluationSliceRequest -from .types.model_service import GetModelRequest -from .types.model_service import ListModelEvaluationSlicesRequest -from .types.model_service import ListModelEvaluationSlicesResponse -from .types.model_service import ListModelEvaluationsRequest -from .types.model_service import ListModelEvaluationsResponse -from .types.model_service import ListModelsRequest -from .types.model_service import ListModelsResponse -from .types.model_service import UpdateModelRequest -from .types.model_service import UploadModelOperationMetadata -from .types.model_service import UploadModelRequest -from .types.model_service import UploadModelResponse -from .types.operation import DeleteOperationMetadata -from .types.operation import GenericOperationMetadata -from .types.pipeline_job import PipelineJob -from .types.pipeline_job import PipelineJobDetail -from .types.pipeline_job import PipelineTaskDetail -from .types.pipeline_job import PipelineTaskExecutorDetail -from .types.pipeline_service import CancelPipelineJobRequest -from .types.pipeline_service import CancelTrainingPipelineRequest -from .types.pipeline_service import CreatePipelineJobRequest -from .types.pipeline_service import CreateTrainingPipelineRequest -from .types.pipeline_service import DeletePipelineJobRequest -from .types.pipeline_service import DeleteTrainingPipelineRequest -from .types.pipeline_service import GetPipelineJobRequest -from .types.pipeline_service import GetTrainingPipelineRequest -from .types.pipeline_service import ListPipelineJobsRequest -from .types.pipeline_service import ListPipelineJobsResponse -from .types.pipeline_service import ListTrainingPipelinesRequest -from .types.pipeline_service import ListTrainingPipelinesResponse -from .types.pipeline_state import PipelineState -from .types.prediction_service import PredictRequest -from .types.prediction_service import PredictResponse -from .types.specialist_pool import SpecialistPool -from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata -from .types.specialist_pool_service import CreateSpecialistPoolRequest -from .types.specialist_pool_service import DeleteSpecialistPoolRequest -from .types.specialist_pool_service import GetSpecialistPoolRequest -from .types.specialist_pool_service import ListSpecialistPoolsRequest -from .types.specialist_pool_service import ListSpecialistPoolsResponse -from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata -from .types.specialist_pool_service import UpdateSpecialistPoolRequest -from .types.study import Measurement -from .types.study import StudySpec -from .types.study import Trial -from .types.training_pipeline import FilterSplit -from .types.training_pipeline import FractionSplit -from .types.training_pipeline import InputDataConfig -from .types.training_pipeline import PredefinedSplit -from .types.training_pipeline import TimestampSplit -from .types.training_pipeline import TrainingPipeline -from .types.user_action_reference import UserActionReference -from .types.value import Value - -__all__ = ( - 'DatasetServiceAsyncClient', - 'EndpointServiceAsyncClient', - 'JobServiceAsyncClient', - 'MigrationServiceAsyncClient', - 'ModelServiceAsyncClient', - 'PipelineServiceAsyncClient', - 'PredictionServiceAsyncClient', - 'SpecialistPoolServiceAsyncClient', -'AcceleratorType', -'ActiveLearningConfig', -'Annotation', -'AnnotationSpec', -'Artifact', -'AutomaticResources', -'AutoscalingMetricSpec', -'BatchDedicatedResources', -'BatchMigrateResourcesOperationMetadata', -'BatchMigrateResourcesRequest', -'BatchMigrateResourcesResponse', -'BatchPredictionJob', -'BigQueryDestination', -'BigQuerySource', -'CancelBatchPredictionJobRequest', -'CancelCustomJobRequest', -'CancelDataLabelingJobRequest', -'CancelHyperparameterTuningJobRequest', -'CancelPipelineJobRequest', -'CancelTrainingPipelineRequest', -'CompletionStats', -'ContainerRegistryDestination', -'ContainerSpec', -'Context', -'CreateBatchPredictionJobRequest', -'CreateCustomJobRequest', -'CreateDataLabelingJobRequest', -'CreateDatasetOperationMetadata', -'CreateDatasetRequest', -'CreateEndpointOperationMetadata', -'CreateEndpointRequest', -'CreateHyperparameterTuningJobRequest', -'CreatePipelineJobRequest', -'CreateSpecialistPoolOperationMetadata', -'CreateSpecialistPoolRequest', -'CreateTrainingPipelineRequest', -'CustomJob', -'CustomJobSpec', -'DataItem', -'DataLabelingJob', -'Dataset', -'DatasetServiceClient', -'DedicatedResources', -'DeleteBatchPredictionJobRequest', -'DeleteCustomJobRequest', -'DeleteDataLabelingJobRequest', -'DeleteDatasetRequest', -'DeleteEndpointRequest', -'DeleteHyperparameterTuningJobRequest', -'DeleteModelRequest', -'DeleteOperationMetadata', -'DeletePipelineJobRequest', -'DeleteSpecialistPoolRequest', -'DeleteTrainingPipelineRequest', -'DeployModelOperationMetadata', -'DeployModelRequest', -'DeployModelResponse', -'DeployedModel', -'DeployedModelRef', -'DiskSpec', -'EncryptionSpec', -'Endpoint', -'EndpointServiceClient', -'EnvVar', -'Execution', -'ExportDataConfig', -'ExportDataOperationMetadata', -'ExportDataRequest', -'ExportDataResponse', -'ExportModelOperationMetadata', -'ExportModelRequest', -'ExportModelResponse', -'FilterSplit', -'FractionSplit', -'GcsDestination', -'GcsSource', -'GenericOperationMetadata', -'GetAnnotationSpecRequest', -'GetBatchPredictionJobRequest', -'GetCustomJobRequest', -'GetDataLabelingJobRequest', -'GetDatasetRequest', -'GetEndpointRequest', -'GetHyperparameterTuningJobRequest', -'GetModelEvaluationRequest', -'GetModelEvaluationSliceRequest', -'GetModelRequest', -'GetPipelineJobRequest', -'GetSpecialistPoolRequest', -'GetTrainingPipelineRequest', -'HyperparameterTuningJob', -'ImportDataConfig', -'ImportDataOperationMetadata', -'ImportDataRequest', -'ImportDataResponse', -'InputDataConfig', -'JobServiceClient', -'JobState', -'ListAnnotationsRequest', -'ListAnnotationsResponse', -'ListBatchPredictionJobsRequest', -'ListBatchPredictionJobsResponse', -'ListCustomJobsRequest', -'ListCustomJobsResponse', -'ListDataItemsRequest', -'ListDataItemsResponse', -'ListDataLabelingJobsRequest', -'ListDataLabelingJobsResponse', -'ListDatasetsRequest', -'ListDatasetsResponse', -'ListEndpointsRequest', -'ListEndpointsResponse', -'ListHyperparameterTuningJobsRequest', -'ListHyperparameterTuningJobsResponse', -'ListModelEvaluationSlicesRequest', -'ListModelEvaluationSlicesResponse', -'ListModelEvaluationsRequest', -'ListModelEvaluationsResponse', -'ListModelsRequest', -'ListModelsResponse', -'ListPipelineJobsRequest', -'ListPipelineJobsResponse', -'ListSpecialistPoolsRequest', -'ListSpecialistPoolsResponse', -'ListTrainingPipelinesRequest', -'ListTrainingPipelinesResponse', -'MachineSpec', -'ManualBatchTuningParameters', -'Measurement', -'MigratableResource', -'MigrateResourceRequest', -'MigrateResourceResponse', -'MigrationServiceClient', -'Model', -'ModelContainerSpec', -'ModelEvaluation', -'ModelEvaluationSlice', -'ModelServiceClient', -'PipelineJob', -'PipelineJobDetail', -'PipelineServiceClient', -'PipelineState', -'PipelineTaskDetail', -'PipelineTaskExecutorDetail', -'Port', -'PredefinedSplit', -'PredictRequest', -'PredictResponse', -'PredictSchemata', -'PredictionServiceClient', -'PythonPackageSpec', -'ResourcesConsumed', -'SampleConfig', -'Scheduling', -'SearchMigratableResourcesRequest', -'SearchMigratableResourcesResponse', -'SpecialistPool', -'SpecialistPoolServiceClient', -'StudySpec', -'TimestampSplit', -'TrainingConfig', -'TrainingPipeline', -'Trial', -'UndeployModelOperationMetadata', -'UndeployModelRequest', -'UndeployModelResponse', -'UpdateDatasetRequest', -'UpdateEndpointRequest', -'UpdateModelRequest', -'UpdateSpecialistPoolOperationMetadata', -'UpdateSpecialistPoolRequest', -'UploadModelOperationMetadata', -'UploadModelRequest', -'UploadModelResponse', -'UserActionReference', -'Value', -'WorkerPoolSpec', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json deleted file mode 100644 index 4b22c95676..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json +++ /dev/null @@ -1,771 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform_v1", - "protoPackage": "google.cloud.aiplatform.v1", - "schema": "1.0", - "services": { - "DatasetService": { - "clients": { - "grpc": { - "libraryClient": "DatasetServiceClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListAnnotations": { - "methods": [ - "list_annotations" - ] - }, - "ListDataItems": { - "methods": [ - "list_data_items" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - } - } - }, - "grpc-async": { - "libraryClient": "DatasetServiceAsyncClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListAnnotations": { - "methods": [ - "list_annotations" - ] - }, - "ListDataItems": { - "methods": [ - "list_data_items" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - } - } - } - } - }, - "EndpointService": { - "clients": { - "grpc": { - "libraryClient": "EndpointServiceClient", - "rpcs": { - "CreateEndpoint": { - "methods": [ - "create_endpoint" - ] - }, - "DeleteEndpoint": { - "methods": [ - "delete_endpoint" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "GetEndpoint": { - "methods": [ - "get_endpoint" - ] - }, - "ListEndpoints": { - "methods": [ - "list_endpoints" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateEndpoint": { - "methods": [ - "update_endpoint" - ] - } - } - }, - "grpc-async": { - "libraryClient": "EndpointServiceAsyncClient", - "rpcs": { - "CreateEndpoint": { - "methods": [ - "create_endpoint" - ] - }, - "DeleteEndpoint": { - "methods": [ - "delete_endpoint" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "GetEndpoint": { - "methods": [ - "get_endpoint" - ] - }, - "ListEndpoints": { - "methods": [ - "list_endpoints" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateEndpoint": { - "methods": [ - "update_endpoint" - ] - } - } - } - } - }, - "JobService": { - "clients": { - "grpc": { - "libraryClient": "JobServiceClient", - "rpcs": { - "CancelBatchPredictionJob": { - "methods": [ - "cancel_batch_prediction_job" - ] - }, - "CancelCustomJob": { - "methods": [ - "cancel_custom_job" - ] - }, - "CancelDataLabelingJob": { - "methods": [ - "cancel_data_labeling_job" - ] - }, - "CancelHyperparameterTuningJob": { - "methods": [ - "cancel_hyperparameter_tuning_job" - ] - }, - "CreateBatchPredictionJob": { - "methods": [ - "create_batch_prediction_job" - ] - }, - "CreateCustomJob": { - "methods": [ - "create_custom_job" - ] - }, - "CreateDataLabelingJob": { - "methods": [ - "create_data_labeling_job" - ] - }, - "CreateHyperparameterTuningJob": { - "methods": [ - "create_hyperparameter_tuning_job" - ] - }, - "DeleteBatchPredictionJob": { - "methods": [ - "delete_batch_prediction_job" - ] - }, - "DeleteCustomJob": { - "methods": [ - "delete_custom_job" - ] - }, - "DeleteDataLabelingJob": { - "methods": [ - "delete_data_labeling_job" - ] - }, - "DeleteHyperparameterTuningJob": { - "methods": [ - "delete_hyperparameter_tuning_job" - ] - }, - "GetBatchPredictionJob": { - "methods": [ - "get_batch_prediction_job" - ] - }, - "GetCustomJob": { - "methods": [ - "get_custom_job" - ] - }, - "GetDataLabelingJob": { - "methods": [ - "get_data_labeling_job" - ] - }, - "GetHyperparameterTuningJob": { - "methods": [ - "get_hyperparameter_tuning_job" - ] - }, - "ListBatchPredictionJobs": { - "methods": [ - "list_batch_prediction_jobs" - ] - }, - "ListCustomJobs": { - "methods": [ - "list_custom_jobs" - ] - }, - "ListDataLabelingJobs": { - "methods": [ - "list_data_labeling_jobs" - ] - }, - "ListHyperparameterTuningJobs": { - "methods": [ - "list_hyperparameter_tuning_jobs" - ] - } - } - }, - "grpc-async": { - "libraryClient": "JobServiceAsyncClient", - "rpcs": { - "CancelBatchPredictionJob": { - "methods": [ - "cancel_batch_prediction_job" - ] - }, - "CancelCustomJob": { - "methods": [ - "cancel_custom_job" - ] - }, - "CancelDataLabelingJob": { - "methods": [ - "cancel_data_labeling_job" - ] - }, - "CancelHyperparameterTuningJob": { - "methods": [ - "cancel_hyperparameter_tuning_job" - ] - }, - "CreateBatchPredictionJob": { - "methods": [ - "create_batch_prediction_job" - ] - }, - "CreateCustomJob": { - "methods": [ - "create_custom_job" - ] - }, - "CreateDataLabelingJob": { - "methods": [ - "create_data_labeling_job" - ] - }, - "CreateHyperparameterTuningJob": { - "methods": [ - "create_hyperparameter_tuning_job" - ] - }, - "DeleteBatchPredictionJob": { - "methods": [ - "delete_batch_prediction_job" - ] - }, - "DeleteCustomJob": { - "methods": [ - "delete_custom_job" - ] - }, - "DeleteDataLabelingJob": { - "methods": [ - "delete_data_labeling_job" - ] - }, - "DeleteHyperparameterTuningJob": { - "methods": [ - "delete_hyperparameter_tuning_job" - ] - }, - "GetBatchPredictionJob": { - "methods": [ - "get_batch_prediction_job" - ] - }, - "GetCustomJob": { - "methods": [ - "get_custom_job" - ] - }, - "GetDataLabelingJob": { - "methods": [ - "get_data_labeling_job" - ] - }, - "GetHyperparameterTuningJob": { - "methods": [ - "get_hyperparameter_tuning_job" - ] - }, - "ListBatchPredictionJobs": { - "methods": [ - "list_batch_prediction_jobs" - ] - }, - "ListCustomJobs": { - "methods": [ - "list_custom_jobs" - ] - }, - "ListDataLabelingJobs": { - "methods": [ - "list_data_labeling_jobs" - ] - }, - "ListHyperparameterTuningJobs": { - "methods": [ - "list_hyperparameter_tuning_jobs" - ] - } - } - } - } - }, - "MigrationService": { - "clients": { - "grpc": { - "libraryClient": "MigrationServiceClient", - "rpcs": { - "BatchMigrateResources": { - "methods": [ - "batch_migrate_resources" - ] - }, - "SearchMigratableResources": { - "methods": [ - "search_migratable_resources" - ] - } - } - }, - "grpc-async": { - "libraryClient": "MigrationServiceAsyncClient", - "rpcs": { - "BatchMigrateResources": { - "methods": [ - "batch_migrate_resources" - ] - }, - "SearchMigratableResources": { - "methods": [ - "search_migratable_resources" - ] - } - } - } - } - }, - "ModelService": { - "clients": { - "grpc": { - "libraryClient": "ModelServiceClient", - "rpcs": { - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetModelEvaluationSlice": { - "methods": [ - "get_model_evaluation_slice" - ] - }, - "ListModelEvaluationSlices": { - "methods": [ - "list_model_evaluation_slices" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - }, - "UploadModel": { - "methods": [ - "upload_model" - ] - } - } - }, - "grpc-async": { - "libraryClient": "ModelServiceAsyncClient", - "rpcs": { - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetModelEvaluationSlice": { - "methods": [ - "get_model_evaluation_slice" - ] - }, - "ListModelEvaluationSlices": { - "methods": [ - "list_model_evaluation_slices" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - }, - "UploadModel": { - "methods": [ - "upload_model" - ] - } - } - } - } - }, - "PipelineService": { - "clients": { - "grpc": { - "libraryClient": "PipelineServiceClient", - "rpcs": { - "CancelPipelineJob": { - "methods": [ - "cancel_pipeline_job" - ] - }, - "CancelTrainingPipeline": { - "methods": [ - "cancel_training_pipeline" - ] - }, - "CreatePipelineJob": { - "methods": [ - "create_pipeline_job" - ] - }, - "CreateTrainingPipeline": { - "methods": [ - "create_training_pipeline" - ] - }, - "DeletePipelineJob": { - "methods": [ - "delete_pipeline_job" - ] - }, - "DeleteTrainingPipeline": { - "methods": [ - "delete_training_pipeline" - ] - }, - "GetPipelineJob": { - "methods": [ - "get_pipeline_job" - ] - }, - "GetTrainingPipeline": { - "methods": [ - "get_training_pipeline" - ] - }, - "ListPipelineJobs": { - "methods": [ - "list_pipeline_jobs" - ] - }, - "ListTrainingPipelines": { - "methods": [ - "list_training_pipelines" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PipelineServiceAsyncClient", - "rpcs": { - "CancelPipelineJob": { - "methods": [ - "cancel_pipeline_job" - ] - }, - "CancelTrainingPipeline": { - "methods": [ - "cancel_training_pipeline" - ] - }, - "CreatePipelineJob": { - "methods": [ - "create_pipeline_job" - ] - }, - "CreateTrainingPipeline": { - "methods": [ - "create_training_pipeline" - ] - }, - "DeletePipelineJob": { - "methods": [ - "delete_pipeline_job" - ] - }, - "DeleteTrainingPipeline": { - "methods": [ - "delete_training_pipeline" - ] - }, - "GetPipelineJob": { - "methods": [ - "get_pipeline_job" - ] - }, - "GetTrainingPipeline": { - "methods": [ - "get_training_pipeline" - ] - }, - "ListPipelineJobs": { - "methods": [ - "list_pipeline_jobs" - ] - }, - "ListTrainingPipelines": { - "methods": [ - "list_training_pipelines" - ] - } - } - } - } - }, - "PredictionService": { - "clients": { - "grpc": { - "libraryClient": "PredictionServiceClient", - "rpcs": { - "Predict": { - "methods": [ - "predict" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PredictionServiceAsyncClient", - "rpcs": { - "Predict": { - "methods": [ - "predict" - ] - } - } - } - } - }, - "SpecialistPoolService": { - "clients": { - "grpc": { - "libraryClient": "SpecialistPoolServiceClient", - "rpcs": { - "CreateSpecialistPool": { - "methods": [ - "create_specialist_pool" - ] - }, - "DeleteSpecialistPool": { - "methods": [ - "delete_specialist_pool" - ] - }, - "GetSpecialistPool": { - "methods": [ - "get_specialist_pool" - ] - }, - "ListSpecialistPools": { - "methods": [ - "list_specialist_pools" - ] - }, - "UpdateSpecialistPool": { - "methods": [ - "update_specialist_pool" - ] - } - } - }, - "grpc-async": { - "libraryClient": "SpecialistPoolServiceAsyncClient", - "rpcs": { - "CreateSpecialistPool": { - "methods": [ - "create_specialist_pool" - ] - }, - "DeleteSpecialistPool": { - "methods": [ - "delete_specialist_pool" - ] - }, - "GetSpecialistPool": { - "methods": [ - "get_specialist_pool" - ] - }, - "ListSpecialistPools": { - "methods": [ - "list_specialist_pools" - ] - }, - "UpdateSpecialistPool": { - "methods": [ - "update_specialist_pool" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed deleted file mode 100644 index 228f1c51c6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py deleted file mode 100644 index 44e8fb2115..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import DatasetServiceClient -from .async_client import DatasetServiceAsyncClient - -__all__ = ( - 'DatasetServiceClient', - 'DatasetServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py deleted file mode 100644 index 1916feda8c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ /dev/null @@ -1,1074 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.dataset_service import pagers -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport -from .client import DatasetServiceClient - - -class DatasetServiceAsyncClient: - """""" - - _client: DatasetServiceClient - - DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT - - annotation_path = staticmethod(DatasetServiceClient.annotation_path) - parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path) - annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path) - parse_annotation_spec_path = staticmethod(DatasetServiceClient.parse_annotation_spec_path) - data_item_path = staticmethod(DatasetServiceClient.data_item_path) - parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path) - dataset_path = staticmethod(DatasetServiceClient.dataset_path) - parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path) - common_billing_account_path = staticmethod(DatasetServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(DatasetServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(DatasetServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(DatasetServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(DatasetServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(DatasetServiceClient.parse_common_organization_path) - common_project_path = staticmethod(DatasetServiceClient.common_project_path) - parse_common_project_path = staticmethod(DatasetServiceClient.parse_common_project_path) - common_location_path = staticmethod(DatasetServiceClient.common_location_path) - parse_common_location_path = staticmethod(DatasetServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceAsyncClient: The constructed client. - """ - return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceAsyncClient: The constructed client. - """ - return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> DatasetServiceTransport: - """Returns the transport used by the client instance. - - Returns: - DatasetServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, DatasetServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the dataset service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.DatasetServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = DatasetServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_dataset(self, - request: dataset_service.CreateDatasetRequest = None, - *, - parent: str = None, - dataset: gca_dataset.Dataset = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CreateDatasetRequest`): - The request object. Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Dataset in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`): - Required. The Dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.Dataset` A - collection of DataItems and Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.CreateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_dataset.Dataset, - metadata_type=dataset_service.CreateDatasetOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_dataset(self, - request: dataset_service.GetDatasetRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetDatasetRequest`): - The request object. Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - name (:class:`str`): - Required. The name of the Dataset - resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.GetDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_dataset(self, - request: dataset_service.UpdateDatasetRequest = None, - *, - dataset: gca_dataset.Dataset = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.UpdateDatasetRequest`): - The request object. Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. - dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`): - Required. The Dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.UpdateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_datasets(self, - request: dataset_service.ListDatasetsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsAsyncPager: - r"""Lists Datasets in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListDatasetsRequest`): - The request object. Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - parent (:class:`str`): - Required. The name of the Dataset's parent resource. - Format: ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsAsyncPager: - Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListDatasetsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_datasets, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDatasetsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_dataset(self, - request: dataset_service.DeleteDatasetRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeleteDatasetRequest`): - The request object. Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. - name (:class:`str`): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.DeleteDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def import_data(self, - request: dataset_service.ImportDataRequest = None, - *, - name: str = None, - import_configs: Sequence[dataset.ImportDataConfig] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Imports data into a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ImportDataRequest`): - The request object. Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - name (:class:`str`): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - import_configs (:class:`Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]`): - Required. The desired input - locations. The contents of all input - locations will be imported in one batch. - - This corresponds to the ``import_configs`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` - Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ImportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if import_configs: - request.import_configs.extend(import_configs) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.import_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - dataset_service.ImportDataResponse, - metadata_type=dataset_service.ImportDataOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_data(self, - request: dataset_service.ExportDataRequest = None, - *, - name: str = None, - export_config: dataset.ExportDataConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports data from a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ExportDataRequest`): - The request object. Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - name (:class:`str`): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - export_config (:class:`google.cloud.aiplatform_v1.types.ExportDataConfig`): - Required. The desired output - location. - - This corresponds to the ``export_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` - Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ExportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if export_config is not None: - request.export_config = export_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - dataset_service.ExportDataResponse, - metadata_type=dataset_service.ExportDataOperationMetadata, - ) - - # Done; return the response. - return response - - async def list_data_items(self, - request: dataset_service.ListDataItemsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataItemsAsyncPager: - r"""Lists DataItems in a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListDataItemsRequest`): - The request object. Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - parent (:class:`str`): - Required. The resource name of the Dataset to list - DataItems from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsAsyncPager: - Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListDataItemsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_data_items, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDataItemsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_annotation_spec(self, - request: dataset_service.GetAnnotationSpecRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an AnnotationSpec. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest`): - The request object. Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. - name (:class:`str`): - Required. The name of the AnnotationSpec resource. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AnnotationSpec: - Identifies a concept with which - DataItems may be annotated with. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.GetAnnotationSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_annotation_spec, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_annotations(self, - request: dataset_service.ListAnnotationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAnnotationsAsyncPager: - r"""Lists Annotations belongs to a dataitem - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListAnnotationsRequest`): - The request object. Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - parent (:class:`str`): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsAsyncPager: - Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListAnnotationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_annotations, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListAnnotationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "DatasetServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py deleted file mode 100644 index 05e5c9a88e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ /dev/null @@ -1,1288 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.dataset_service import pagers -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import DatasetServiceGrpcTransport -from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport - - -class DatasetServiceClientMeta(type): - """Metaclass for the DatasetService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] - _transport_registry["grpc"] = DatasetServiceGrpcTransport - _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[DatasetServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class DatasetServiceClient(metaclass=DatasetServiceClientMeta): - """""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> DatasetServiceTransport: - """Returns the transport used by the client instance. - - Returns: - DatasetServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotation_path(project: str,location: str,dataset: str,data_item: str,annotation: str,) -> str: - """Returns a fully-qualified annotation string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) - - @staticmethod - def parse_annotation_path(path: str) -> Dict[str,str]: - """Parses a annotation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: - """Returns a fully-qualified annotation_spec string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - - @staticmethod - def parse_annotation_spec_path(path: str) -> Dict[str,str]: - """Parses a annotation_spec path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def data_item_path(project: str,location: str,dataset: str,data_item: str,) -> str: - """Returns a fully-qualified data_item string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) - - @staticmethod - def parse_data_item_path(path: str) -> Dict[str,str]: - """Parses a data_item path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, DatasetServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the dataset service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, DatasetServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, DatasetServiceTransport): - # transport is a DatasetServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_dataset(self, - request: dataset_service.CreateDatasetRequest = None, - *, - parent: str = None, - dataset: gca_dataset.Dataset = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a Dataset. - - Args: - request (google.cloud.aiplatform_v1.types.CreateDatasetRequest): - The request object. Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. - parent (str): - Required. The resource name of the Location to create - the Dataset in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (google.cloud.aiplatform_v1.types.Dataset): - Required. The Dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.Dataset` A - collection of DataItems and Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.CreateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.CreateDatasetRequest): - request = dataset_service.CreateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_dataset.Dataset, - metadata_type=dataset_service.CreateDatasetOperationMetadata, - ) - - # Done; return the response. - return response - - def get_dataset(self, - request: dataset_service.GetDatasetRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a Dataset. - - Args: - request (google.cloud.aiplatform_v1.types.GetDatasetRequest): - The request object. Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - name (str): - Required. The name of the Dataset - resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.GetDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.GetDatasetRequest): - request = dataset_service.GetDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_dataset(self, - request: dataset_service.UpdateDatasetRequest = None, - *, - dataset: gca_dataset.Dataset = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a Dataset. - - Args: - request (google.cloud.aiplatform_v1.types.UpdateDatasetRequest): - The request object. Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. - dataset (google.cloud.aiplatform_v1.types.Dataset): - Required. The Dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.UpdateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.UpdateDatasetRequest): - request = dataset_service.UpdateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_datasets(self, - request: dataset_service.ListDatasetsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsPager: - r"""Lists Datasets in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): - The request object. Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - parent (str): - Required. The name of the Dataset's parent resource. - Format: ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsPager: - Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListDatasetsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListDatasetsRequest): - request = dataset_service.ListDatasetsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_datasets] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDatasetsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_dataset(self, - request: dataset_service.DeleteDatasetRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Dataset. - - Args: - request (google.cloud.aiplatform_v1.types.DeleteDatasetRequest): - The request object. Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. - name (str): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.DeleteDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.DeleteDatasetRequest): - request = dataset_service.DeleteDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def import_data(self, - request: dataset_service.ImportDataRequest = None, - *, - name: str = None, - import_configs: Sequence[dataset.ImportDataConfig] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Imports data into a Dataset. - - Args: - request (google.cloud.aiplatform_v1.types.ImportDataRequest): - The request object. Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]): - Required. The desired input - locations. The contents of all input - locations will be imported in one batch. - - This corresponds to the ``import_configs`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` - Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ImportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ImportDataRequest): - request = dataset_service.ImportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if import_configs is not None: - request.import_configs = import_configs - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.import_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - dataset_service.ImportDataResponse, - metadata_type=dataset_service.ImportDataOperationMetadata, - ) - - # Done; return the response. - return response - - def export_data(self, - request: dataset_service.ExportDataRequest = None, - *, - name: str = None, - export_config: dataset.ExportDataConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports data from a Dataset. - - Args: - request (google.cloud.aiplatform_v1.types.ExportDataRequest): - The request object. Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - export_config (google.cloud.aiplatform_v1.types.ExportDataConfig): - Required. The desired output - location. - - This corresponds to the ``export_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` - Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ExportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ExportDataRequest): - request = dataset_service.ExportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if export_config is not None: - request.export_config = export_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - dataset_service.ExportDataResponse, - metadata_type=dataset_service.ExportDataOperationMetadata, - ) - - # Done; return the response. - return response - - def list_data_items(self, - request: dataset_service.ListDataItemsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataItemsPager: - r"""Lists DataItems in a Dataset. - - Args: - request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): - The request object. Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - parent (str): - Required. The resource name of the Dataset to list - DataItems from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsPager: - Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListDataItemsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListDataItemsRequest): - request = dataset_service.ListDataItemsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_data_items] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDataItemsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_annotation_spec(self, - request: dataset_service.GetAnnotationSpecRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an AnnotationSpec. - - Args: - request (google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest): - The request object. Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. - name (str): - Required. The name of the AnnotationSpec resource. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AnnotationSpec: - Identifies a concept with which - DataItems may be annotated with. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.GetAnnotationSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.GetAnnotationSpecRequest): - request = dataset_service.GetAnnotationSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_annotations(self, - request: dataset_service.ListAnnotationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAnnotationsPager: - r"""Lists Annotations belongs to a dataitem - - Args: - request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): - The request object. Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - parent (str): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsPager: - Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListAnnotationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListAnnotationsRequest): - request = dataset_service.ListAnnotationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_annotations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListAnnotationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "DatasetServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py deleted file mode 100644 index 6621ccddce..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset_service - - -class ListDatasetsPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListDatasetsResponse], - request: dataset_service.ListDatasetsRequest, - response: dataset_service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[dataset_service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[dataset.Dataset]: - for page in self.pages: - yield from page.datasets - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDatasetsAsyncPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListDatasetsResponse]], - request: dataset_service.ListDatasetsRequest, - response: dataset_service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[dataset_service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[dataset.Dataset]: - async def async_generator(): - async for page in self.pages: - for response in page.datasets: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataItemsPager: - """A pager for iterating through ``list_data_items`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``data_items`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDataItems`` requests and continue to iterate - through the ``data_items`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListDataItemsResponse], - request: dataset_service.ListDataItemsRequest, - response: dataset_service.ListDataItemsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDataItemsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDataItemsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[dataset_service.ListDataItemsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[data_item.DataItem]: - for page in self.pages: - yield from page.data_items - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataItemsAsyncPager: - """A pager for iterating through ``list_data_items`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``data_items`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDataItems`` requests and continue to iterate - through the ``data_items`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListDataItemsResponse]], - request: dataset_service.ListDataItemsRequest, - response: dataset_service.ListDataItemsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDataItemsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDataItemsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[dataset_service.ListDataItemsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[data_item.DataItem]: - async def async_generator(): - async for page in self.pages: - for response in page.data_items: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAnnotationsPager: - """A pager for iterating through ``list_annotations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``annotations`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListAnnotations`` requests and continue to iterate - through the ``annotations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListAnnotationsResponse], - request: dataset_service.ListAnnotationsRequest, - response: dataset_service.ListAnnotationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListAnnotationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[dataset_service.ListAnnotationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[annotation.Annotation]: - for page in self.pages: - yield from page.annotations - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAnnotationsAsyncPager: - """A pager for iterating through ``list_annotations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``annotations`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListAnnotations`` requests and continue to iterate - through the ``annotations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListAnnotationsResponse]], - request: dataset_service.ListAnnotationsRequest, - response: dataset_service.ListAnnotationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListAnnotationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[dataset_service.ListAnnotationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[annotation.Annotation]: - async def async_generator(): - async for page in self.pages: - for response in page.annotations: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py deleted file mode 100644 index 561b0c5cfd..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import DatasetServiceTransport -from .grpc import DatasetServiceGrpcTransport -from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] -_transport_registry['grpc'] = DatasetServiceGrpcTransport -_transport_registry['grpc_asyncio'] = DatasetServiceGrpcAsyncIOTransport - -__all__ = ( - 'DatasetServiceTransport', - 'DatasetServiceGrpcTransport', - 'DatasetServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py deleted file mode 100644 index 4f895a87f0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py +++ /dev/null @@ -1,304 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class DatasetServiceTransport(abc.ABC): - """Abstract transport class for DatasetService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_dataset: gapic_v1.method.wrap_method( - self.create_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.get_dataset: gapic_v1.method.wrap_method( - self.get_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.update_dataset: gapic_v1.method.wrap_method( - self.update_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.list_datasets: gapic_v1.method.wrap_method( - self.list_datasets, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_dataset: gapic_v1.method.wrap_method( - self.delete_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.import_data: gapic_v1.method.wrap_method( - self.import_data, - default_timeout=5.0, - client_info=client_info, - ), - self.export_data: gapic_v1.method.wrap_method( - self.export_data, - default_timeout=5.0, - client_info=client_info, - ), - self.list_data_items: gapic_v1.method.wrap_method( - self.list_data_items, - default_timeout=5.0, - client_info=client_info, - ), - self.get_annotation_spec: gapic_v1.method.wrap_method( - self.get_annotation_spec, - default_timeout=5.0, - client_info=client_info, - ), - self.list_annotations: gapic_v1.method.wrap_method( - self.list_annotations, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - Union[ - dataset.Dataset, - Awaitable[dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - Union[ - gca_dataset.Dataset, - Awaitable[gca_dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - Union[ - dataset_service.ListDatasetsResponse, - Awaitable[dataset_service.ListDatasetsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - Union[ - dataset_service.ListDataItemsResponse, - Awaitable[dataset_service.ListDataItemsResponse] - ]]: - raise NotImplementedError() - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - Union[ - annotation_spec.AnnotationSpec, - Awaitable[annotation_spec.AnnotationSpec] - ]]: - raise NotImplementedError() - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - Union[ - dataset_service.ListAnnotationsResponse, - Awaitable[dataset_service.ListAnnotationsResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'DatasetServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py deleted file mode 100644 index d3b6f3ff83..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ /dev/null @@ -1,506 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore -from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO - - -class DatasetServiceGrpcTransport(DatasetServiceTransport): - """gRPC backend transport for DatasetService. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a Dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/CreateDataset', - request_serializer=dataset_service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - dataset.Dataset]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a Dataset. - - Returns: - Callable[[~.GetDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/GetDataset', - request_serializer=dataset_service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - gca_dataset.Dataset]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a Dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/UpdateDataset', - request_serializer=dataset_service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - dataset_service.ListDatasetsResponse]: - r"""Return a callable for the list datasets method over gRPC. - - Lists Datasets in a Location. - - Returns: - Callable[[~.ListDatasetsRequest], - ~.ListDatasetsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListDatasets', - request_serializer=dataset_service.ListDatasetsRequest.serialize, - response_deserializer=dataset_service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a Dataset. - - Returns: - Callable[[~.DeleteDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/DeleteDataset', - request_serializer=dataset_service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a Dataset. - - Returns: - Callable[[~.ImportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ImportData', - request_serializer=dataset_service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the export data method over gRPC. - - Exports data from a Dataset. - - Returns: - Callable[[~.ExportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ExportData', - request_serializer=dataset_service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - dataset_service.ListDataItemsResponse]: - r"""Return a callable for the list data items method over gRPC. - - Lists DataItems in a Dataset. - - Returns: - Callable[[~.ListDataItemsRequest], - ~.ListDataItemsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_items' not in self._stubs: - self._stubs['list_data_items'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListDataItems', - request_serializer=dataset_service.ListDataItemsRequest.serialize, - response_deserializer=dataset_service.ListDataItemsResponse.deserialize, - ) - return self._stubs['list_data_items'] - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - annotation_spec.AnnotationSpec]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an AnnotationSpec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - ~.AnnotationSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec', - request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - dataset_service.ListAnnotationsResponse]: - r"""Return a callable for the list annotations method over gRPC. - - Lists Annotations belongs to a dataitem - - Returns: - Callable[[~.ListAnnotationsRequest], - ~.ListAnnotationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_annotations' not in self._stubs: - self._stubs['list_annotations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListAnnotations', - request_serializer=dataset_service.ListAnnotationsRequest.serialize, - response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, - ) - return self._stubs['list_annotations'] - - -__all__ = ( - 'DatasetServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py deleted file mode 100644 index 3bf60c814a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,510 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore -from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import DatasetServiceGrpcTransport - - -class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport): - """gRPC AsyncIO backend transport for DatasetService. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a Dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/CreateDataset', - request_serializer=dataset_service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - Awaitable[dataset.Dataset]]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a Dataset. - - Returns: - Callable[[~.GetDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/GetDataset', - request_serializer=dataset_service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - Awaitable[gca_dataset.Dataset]]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a Dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/UpdateDataset', - request_serializer=dataset_service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - Awaitable[dataset_service.ListDatasetsResponse]]: - r"""Return a callable for the list datasets method over gRPC. - - Lists Datasets in a Location. - - Returns: - Callable[[~.ListDatasetsRequest], - Awaitable[~.ListDatasetsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListDatasets', - request_serializer=dataset_service.ListDatasetsRequest.serialize, - response_deserializer=dataset_service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a Dataset. - - Returns: - Callable[[~.DeleteDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/DeleteDataset', - request_serializer=dataset_service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a Dataset. - - Returns: - Callable[[~.ImportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ImportData', - request_serializer=dataset_service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export data method over gRPC. - - Exports data from a Dataset. - - Returns: - Callable[[~.ExportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ExportData', - request_serializer=dataset_service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - Awaitable[dataset_service.ListDataItemsResponse]]: - r"""Return a callable for the list data items method over gRPC. - - Lists DataItems in a Dataset. - - Returns: - Callable[[~.ListDataItemsRequest], - Awaitable[~.ListDataItemsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_items' not in self._stubs: - self._stubs['list_data_items'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListDataItems', - request_serializer=dataset_service.ListDataItemsRequest.serialize, - response_deserializer=dataset_service.ListDataItemsResponse.deserialize, - ) - return self._stubs['list_data_items'] - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - Awaitable[annotation_spec.AnnotationSpec]]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an AnnotationSpec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - Awaitable[~.AnnotationSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec', - request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - Awaitable[dataset_service.ListAnnotationsResponse]]: - r"""Return a callable for the list annotations method over gRPC. - - Lists Annotations belongs to a dataitem - - Returns: - Callable[[~.ListAnnotationsRequest], - Awaitable[~.ListAnnotationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_annotations' not in self._stubs: - self._stubs['list_annotations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListAnnotations', - request_serializer=dataset_service.ListAnnotationsRequest.serialize, - response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, - ) - return self._stubs['list_annotations'] - - -__all__ = ( - 'DatasetServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py deleted file mode 100644 index 7db43e768e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import EndpointServiceClient -from .async_client import EndpointServiceAsyncClient - -__all__ = ( - 'EndpointServiceClient', - 'EndpointServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py deleted file mode 100644 index 16b4fe3f32..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ /dev/null @@ -1,860 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport -from .client import EndpointServiceClient - - -class EndpointServiceAsyncClient: - """""" - - _client: EndpointServiceClient - - DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(EndpointServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) - model_path = staticmethod(EndpointServiceClient.model_path) - parse_model_path = staticmethod(EndpointServiceClient.parse_model_path) - common_billing_account_path = staticmethod(EndpointServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(EndpointServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(EndpointServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(EndpointServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(EndpointServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(EndpointServiceClient.parse_common_organization_path) - common_project_path = staticmethod(EndpointServiceClient.common_project_path) - parse_common_project_path = staticmethod(EndpointServiceClient.parse_common_project_path) - common_location_path = staticmethod(EndpointServiceClient.common_location_path) - parse_common_location_path = staticmethod(EndpointServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceAsyncClient: The constructed client. - """ - return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceAsyncClient: The constructed client. - """ - return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> EndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - EndpointServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, EndpointServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.EndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = EndpointServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_endpoint(self, - request: endpoint_service.CreateEndpointRequest = None, - *, - parent: str = None, - endpoint: gca_endpoint.Endpoint = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an Endpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CreateEndpointRequest`): - The request object. Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Endpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`): - Required. The Endpoint to create. - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.CreateEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if endpoint is not None: - request.endpoint = endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_endpoint.Endpoint, - metadata_type=endpoint_service.CreateEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_endpoint(self, - request: endpoint_service.GetEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> endpoint.Endpoint: - r"""Gets an Endpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetEndpointRequest`): - The request object. Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] - name (:class:`str`): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.GetEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_endpoints(self, - request: endpoint_service.ListEndpointsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEndpointsAsyncPager: - r"""Lists Endpoints in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListEndpointsRequest`): - The request object. Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager: - Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.ListEndpointsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_endpoints, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListEndpointsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_endpoint(self, - request: endpoint_service.UpdateEndpointRequest = None, - *, - endpoint: gca_endpoint.Endpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_endpoint.Endpoint: - r"""Updates an Endpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.UpdateEndpointRequest`): - The request object. Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`): - Required. The Endpoint which replaces - the resource on the server. - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.UpdateEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint.name", request.endpoint.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_endpoint(self, - request: endpoint_service.DeleteEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Endpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeleteEndpointRequest`): - The request object. Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. - name (:class:`str`): - Required. The name of the Endpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.DeleteEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def deploy_model(self, - request: endpoint_service.DeployModelRequest = None, - *, - endpoint: str = None, - deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeployModelRequest`): - The request object. Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - endpoint (:class:`str`): - Required. The name of the Endpoint resource into which - to deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model (:class:`google.cloud.aiplatform_v1.types.DeployedModel`): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - - This corresponds to the ``deployed_model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]`): - A map from a DeployedModel's ID to the percentage of - this Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the - just being deployed Model, a "0" should be used, and the - actual ID of the new DeployedModel will be filled in its - place by this method. The traffic percentage values must - add up to 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - is not updated. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` - Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.DeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model is not None: - request.deployed_model = deployed_model - - if traffic_split: - request.traffic_split.update(traffic_split) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.deploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - endpoint_service.DeployModelResponse, - metadata_type=endpoint_service.DeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def undeploy_model(self, - request: endpoint_service.UndeployModelRequest = None, - *, - endpoint: str = None, - deployed_model_id: str = None, - traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.UndeployModelRequest`): - The request object. Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - endpoint (:class:`str`): - Required. The name of the Endpoint resource from which - to undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - Required. The ID of the DeployedModel - to be undeployed from the Endpoint. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]`): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is - being undeployed from the Endpoint, the - [Endpoint.traffic_split] will always end up empty when - this call returns. A DeployedModel will be successfully - undeployed only if it doesn't have any traffic assigned - to it when this method executes, or if this field - unassigns any traffic to it. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` - Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.UndeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - if traffic_split: - request.traffic_split.update(traffic_split) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undeploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - endpoint_service.UndeployModelResponse, - metadata_type=endpoint_service.UndeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "EndpointServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py deleted file mode 100644 index eb2fa99da5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ /dev/null @@ -1,1054 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import EndpointServiceGrpcTransport -from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport - - -class EndpointServiceClientMeta(type): - """Metaclass for the EndpointService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] - _transport_registry["grpc"] = EndpointServiceGrpcTransport - _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[EndpointServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class EndpointServiceClient(metaclass=EndpointServiceClientMeta): - """""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> EndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - EndpointServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, EndpointServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, EndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, EndpointServiceTransport): - # transport is a EndpointServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_endpoint(self, - request: endpoint_service.CreateEndpointRequest = None, - *, - parent: str = None, - endpoint: gca_endpoint.Endpoint = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an Endpoint. - - Args: - request (google.cloud.aiplatform_v1.types.CreateEndpointRequest): - The request object. Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. - parent (str): - Required. The resource name of the Location to create - the Endpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint (google.cloud.aiplatform_v1.types.Endpoint): - Required. The Endpoint to create. - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.CreateEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.CreateEndpointRequest): - request = endpoint_service.CreateEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if endpoint is not None: - request.endpoint = endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_endpoint.Endpoint, - metadata_type=endpoint_service.CreateEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - def get_endpoint(self, - request: endpoint_service.GetEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> endpoint.Endpoint: - r"""Gets an Endpoint. - - Args: - request (google.cloud.aiplatform_v1.types.GetEndpointRequest): - The request object. Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] - name (str): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.GetEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.GetEndpointRequest): - request = endpoint_service.GetEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_endpoints(self, - request: endpoint_service.ListEndpointsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEndpointsPager: - r"""Lists Endpoints in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): - The request object. Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - parent (str): - Required. The resource name of the Location from which - to list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsPager: - Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.ListEndpointsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.ListEndpointsRequest): - request = endpoint_service.ListEndpointsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_endpoints] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListEndpointsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_endpoint(self, - request: endpoint_service.UpdateEndpointRequest = None, - *, - endpoint: gca_endpoint.Endpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_endpoint.Endpoint: - r"""Updates an Endpoint. - - Args: - request (google.cloud.aiplatform_v1.types.UpdateEndpointRequest): - The request object. Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - endpoint (google.cloud.aiplatform_v1.types.Endpoint): - Required. The Endpoint which replaces - the resource on the server. - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.UpdateEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.UpdateEndpointRequest): - request = endpoint_service.UpdateEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint.name", request.endpoint.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_endpoint(self, - request: endpoint_service.DeleteEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Endpoint. - - Args: - request (google.cloud.aiplatform_v1.types.DeleteEndpointRequest): - The request object. Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. - name (str): - Required. The name of the Endpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.DeleteEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.DeleteEndpointRequest): - request = endpoint_service.DeleteEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def deploy_model(self, - request: endpoint_service.DeployModelRequest = None, - *, - endpoint: str = None, - deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Args: - request (google.cloud.aiplatform_v1.types.DeployModelRequest): - The request object. Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - endpoint (str): - Required. The name of the Endpoint resource into which - to deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - - This corresponds to the ``deployed_model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]): - A map from a DeployedModel's ID to the percentage of - this Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the - just being deployed Model, a "0" should be used, and the - actual ID of the new DeployedModel will be filled in its - place by this method. The traffic percentage values must - add up to 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - is not updated. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` - Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.DeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.DeployModelRequest): - request = endpoint_service.DeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model is not None: - request.deployed_model = deployed_model - if traffic_split is not None: - request.traffic_split = traffic_split - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.deploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - endpoint_service.DeployModelResponse, - metadata_type=endpoint_service.DeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - def undeploy_model(self, - request: endpoint_service.UndeployModelRequest = None, - *, - endpoint: str = None, - deployed_model_id: str = None, - traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Args: - request (google.cloud.aiplatform_v1.types.UndeployModelRequest): - The request object. Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - endpoint (str): - Required. The name of the Endpoint resource from which - to undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - Required. The ID of the DeployedModel - to be undeployed from the Endpoint. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is - being undeployed from the Endpoint, the - [Endpoint.traffic_split] will always end up empty when - this call returns. A DeployedModel will be successfully - undeployed only if it doesn't have any traffic assigned - to it when this method executes, or if this field - unassigns any traffic to it. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` - Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.UndeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.UndeployModelRequest): - request = endpoint_service.UndeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - if traffic_split is not None: - request.traffic_split = traffic_split - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undeploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - endpoint_service.UndeployModelResponse, - metadata_type=endpoint_service.UndeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "EndpointServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py deleted file mode 100644 index 98d4f5eda7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint_service - - -class ListEndpointsPager: - """A pager for iterating through ``list_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``endpoints`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListEndpoints`` requests and continue to iterate - through the ``endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., endpoint_service.ListEndpointsResponse], - request: endpoint_service.ListEndpointsRequest, - response: endpoint_service.ListEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = endpoint_service.ListEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[endpoint_service.ListEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[endpoint.Endpoint]: - for page in self.pages: - yield from page.endpoints - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEndpointsAsyncPager: - """A pager for iterating through ``list_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``endpoints`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListEndpoints`` requests and continue to iterate - through the ``endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[endpoint_service.ListEndpointsResponse]], - request: endpoint_service.ListEndpointsRequest, - response: endpoint_service.ListEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = endpoint_service.ListEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[endpoint_service.ListEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[endpoint.Endpoint]: - async def async_generator(): - async for page in self.pages: - for response in page.endpoints: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py deleted file mode 100644 index a062fc074c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import EndpointServiceTransport -from .grpc import EndpointServiceGrpcTransport -from .grpc_asyncio import EndpointServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] -_transport_registry['grpc'] = EndpointServiceGrpcTransport -_transport_registry['grpc_asyncio'] = EndpointServiceGrpcAsyncIOTransport - -__all__ = ( - 'EndpointServiceTransport', - 'EndpointServiceGrpcTransport', - 'EndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py deleted file mode 100644 index fa9af900e8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py +++ /dev/null @@ -1,261 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class EndpointServiceTransport(abc.ABC): - """Abstract transport class for EndpointService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_endpoint: gapic_v1.method.wrap_method( - self.create_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.get_endpoint: gapic_v1.method.wrap_method( - self.get_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.list_endpoints: gapic_v1.method.wrap_method( - self.list_endpoints, - default_timeout=5.0, - client_info=client_info, - ), - self.update_endpoint: gapic_v1.method.wrap_method( - self.update_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_endpoint: gapic_v1.method.wrap_method( - self.delete_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.deploy_model: gapic_v1.method.wrap_method( - self.deploy_model, - default_timeout=5.0, - client_info=client_info, - ), - self.undeploy_model: gapic_v1.method.wrap_method( - self.undeploy_model, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - Union[ - endpoint.Endpoint, - Awaitable[endpoint.Endpoint] - ]]: - raise NotImplementedError() - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - Union[ - endpoint_service.ListEndpointsResponse, - Awaitable[endpoint_service.ListEndpointsResponse] - ]]: - raise NotImplementedError() - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - Union[ - gca_endpoint.Endpoint, - Awaitable[gca_endpoint.Endpoint] - ]]: - raise NotImplementedError() - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'EndpointServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py deleted file mode 100644 index c1f467a4db..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py +++ /dev/null @@ -1,430 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO - - -class EndpointServiceGrpcTransport(EndpointServiceTransport): - """gRPC backend transport for EndpointService. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the create endpoint method over gRPC. - - Creates an Endpoint. - - Returns: - Callable[[~.CreateEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_endpoint' not in self._stubs: - self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint', - request_serializer=endpoint_service.CreateEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_endpoint'] - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - endpoint.Endpoint]: - r"""Return a callable for the get endpoint method over gRPC. - - Gets an Endpoint. - - Returns: - Callable[[~.GetEndpointRequest], - ~.Endpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_endpoint' not in self._stubs: - self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/GetEndpoint', - request_serializer=endpoint_service.GetEndpointRequest.serialize, - response_deserializer=endpoint.Endpoint.deserialize, - ) - return self._stubs['get_endpoint'] - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - endpoint_service.ListEndpointsResponse]: - r"""Return a callable for the list endpoints method over gRPC. - - Lists Endpoints in a Location. - - Returns: - Callable[[~.ListEndpointsRequest], - ~.ListEndpointsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_endpoints' not in self._stubs: - self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/ListEndpoints', - request_serializer=endpoint_service.ListEndpointsRequest.serialize, - response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, - ) - return self._stubs['list_endpoints'] - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - gca_endpoint.Endpoint]: - r"""Return a callable for the update endpoint method over gRPC. - - Updates an Endpoint. - - Returns: - Callable[[~.UpdateEndpointRequest], - ~.Endpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_endpoint' not in self._stubs: - self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint', - request_serializer=endpoint_service.UpdateEndpointRequest.serialize, - response_deserializer=gca_endpoint.Endpoint.deserialize, - ) - return self._stubs['update_endpoint'] - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete endpoint method over gRPC. - - Deletes an Endpoint. - - Returns: - Callable[[~.DeleteEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_endpoint' not in self._stubs: - self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint', - request_serializer=endpoint_service.DeleteEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_endpoint'] - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Returns: - Callable[[~.DeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/DeployModel', - request_serializer=endpoint_service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/UndeployModel', - request_serializer=endpoint_service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - -__all__ = ( - 'EndpointServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py deleted file mode 100644 index 7c7d569905..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import EndpointServiceGrpcTransport - - -class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport): - """gRPC AsyncIO backend transport for EndpointService. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create endpoint method over gRPC. - - Creates an Endpoint. - - Returns: - Callable[[~.CreateEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_endpoint' not in self._stubs: - self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint', - request_serializer=endpoint_service.CreateEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_endpoint'] - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - Awaitable[endpoint.Endpoint]]: - r"""Return a callable for the get endpoint method over gRPC. - - Gets an Endpoint. - - Returns: - Callable[[~.GetEndpointRequest], - Awaitable[~.Endpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_endpoint' not in self._stubs: - self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/GetEndpoint', - request_serializer=endpoint_service.GetEndpointRequest.serialize, - response_deserializer=endpoint.Endpoint.deserialize, - ) - return self._stubs['get_endpoint'] - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - Awaitable[endpoint_service.ListEndpointsResponse]]: - r"""Return a callable for the list endpoints method over gRPC. - - Lists Endpoints in a Location. - - Returns: - Callable[[~.ListEndpointsRequest], - Awaitable[~.ListEndpointsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_endpoints' not in self._stubs: - self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/ListEndpoints', - request_serializer=endpoint_service.ListEndpointsRequest.serialize, - response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, - ) - return self._stubs['list_endpoints'] - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - Awaitable[gca_endpoint.Endpoint]]: - r"""Return a callable for the update endpoint method over gRPC. - - Updates an Endpoint. - - Returns: - Callable[[~.UpdateEndpointRequest], - Awaitable[~.Endpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_endpoint' not in self._stubs: - self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint', - request_serializer=endpoint_service.UpdateEndpointRequest.serialize, - response_deserializer=gca_endpoint.Endpoint.deserialize, - ) - return self._stubs['update_endpoint'] - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete endpoint method over gRPC. - - Deletes an Endpoint. - - Returns: - Callable[[~.DeleteEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_endpoint' not in self._stubs: - self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint', - request_serializer=endpoint_service.DeleteEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_endpoint'] - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Returns: - Callable[[~.DeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/DeployModel', - request_serializer=endpoint_service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/UndeployModel', - request_serializer=endpoint_service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - -__all__ = ( - 'EndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py deleted file mode 100644 index 817e1b49e2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import JobServiceClient -from .async_client import JobServiceAsyncClient - -__all__ = ( - 'JobServiceClient', - 'JobServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py deleted file mode 100644 index 46ce9f78fb..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ /dev/null @@ -1,1913 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.job_service import pagers -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import completion_stats -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import study -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport -from .client import JobServiceClient - - -class JobServiceAsyncClient: - """A service for creating and managing Vertex AI's jobs.""" - - _client: JobServiceClient - - DEFAULT_ENDPOINT = JobServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = JobServiceClient.DEFAULT_MTLS_ENDPOINT - - batch_prediction_job_path = staticmethod(JobServiceClient.batch_prediction_job_path) - parse_batch_prediction_job_path = staticmethod(JobServiceClient.parse_batch_prediction_job_path) - custom_job_path = staticmethod(JobServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(JobServiceClient.parse_custom_job_path) - data_labeling_job_path = staticmethod(JobServiceClient.data_labeling_job_path) - parse_data_labeling_job_path = staticmethod(JobServiceClient.parse_data_labeling_job_path) - dataset_path = staticmethod(JobServiceClient.dataset_path) - parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path) - hyperparameter_tuning_job_path = staticmethod(JobServiceClient.hyperparameter_tuning_job_path) - parse_hyperparameter_tuning_job_path = staticmethod(JobServiceClient.parse_hyperparameter_tuning_job_path) - model_path = staticmethod(JobServiceClient.model_path) - parse_model_path = staticmethod(JobServiceClient.parse_model_path) - network_path = staticmethod(JobServiceClient.network_path) - parse_network_path = staticmethod(JobServiceClient.parse_network_path) - trial_path = staticmethod(JobServiceClient.trial_path) - parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) - common_billing_account_path = staticmethod(JobServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(JobServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(JobServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(JobServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(JobServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(JobServiceClient.parse_common_organization_path) - common_project_path = staticmethod(JobServiceClient.common_project_path) - parse_common_project_path = staticmethod(JobServiceClient.parse_common_project_path) - common_location_path = staticmethod(JobServiceClient.common_location_path) - parse_common_location_path = staticmethod(JobServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceAsyncClient: The constructed client. - """ - return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceAsyncClient: The constructed client. - """ - return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobServiceTransport: - """Returns the transport used by the client instance. - - Returns: - JobServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(JobServiceClient).get_transport_class, type(JobServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, JobServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.JobServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = JobServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_custom_job(self, - request: job_service.CreateCustomJobRequest = None, - *, - parent: str = None, - custom_job: gca_custom_job.CustomJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_custom_job.CustomJob: - r"""Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CreateCustomJobRequest`): - The request object. Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - custom_job (:class:`google.cloud.aiplatform_v1.types.CustomJob`): - Required. The CustomJob to create. - This corresponds to the ``custom_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if custom_job is not None: - request.custom_job = custom_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_custom_job(self, - request: job_service.GetCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> custom_job.CustomJob: - r"""Gets a CustomJob. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetCustomJobRequest`): - The request object. Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_custom_jobs(self, - request: job_service.ListCustomJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListCustomJobsAsyncPager: - r"""Lists CustomJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListCustomJobsRequest`): - The request object. Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager: - Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListCustomJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_custom_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListCustomJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_custom_job(self, - request: job_service.DeleteCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a CustomJob. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeleteCustomJobRequest`): - The request object. Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_custom_job(self, - request: job_service.CancelCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is - set to ``CANCELLED``. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CancelCustomJobRequest`): - The request object. Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_data_labeling_job(self, - request: job_service.CreateDataLabelingJobRequest = None, - *, - parent: str = None, - data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_data_labeling_job.DataLabelingJob: - r"""Creates a DataLabelingJob. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest`): - The request object. Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. - parent (:class:`str`): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - data_labeling_job (:class:`google.cloud.aiplatform_v1.types.DataLabelingJob`): - Required. The DataLabelingJob to - create. - - This corresponds to the ``data_labeling_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if data_labeling_job is not None: - request.data_labeling_job = data_labeling_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_data_labeling_job(self, - request: job_service.GetDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> data_labeling_job.DataLabelingJob: - r"""Gets a DataLabelingJob. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest`): - The request object. Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_data_labeling_jobs(self, - request: job_service.ListDataLabelingJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataLabelingJobsAsyncPager: - r"""Lists DataLabelingJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest`): - The request object. Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - parent (:class:`str`): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: - Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListDataLabelingJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_data_labeling_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDataLabelingJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_data_labeling_job(self, - request: job_service.DeleteDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a DataLabelingJob. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest`): - The request object. Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_data_labeling_job(self, - request: job_service.CancelDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest`): - The request object. Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_hyperparameter_tuning_job(self, - request: job_service.CreateHyperparameterTuningJobRequest = None, - *, - parent: str = None, - hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Creates a HyperparameterTuningJob - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest`): - The request object. Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - hyperparameter_tuning_job (:class:`google.cloud.aiplatform_v1.types.HyperparameterTuningJob`): - Required. The HyperparameterTuningJob - to create. - - This corresponds to the ``hyperparameter_tuning_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if hyperparameter_tuning_job is not None: - request.hyperparameter_tuning_job = hyperparameter_tuning_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_hyperparameter_tuning_job(self, - request: job_service.GetHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Gets a HyperparameterTuningJob - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest`): - The request object. Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob - resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_hyperparameter_tuning_jobs(self, - request: job_service.ListHyperparameterTuningJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHyperparameterTuningJobsAsyncPager: - r"""Lists HyperparameterTuningJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest`): - The request object. Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: - Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListHyperparameterTuningJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_hyperparameter_tuning_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListHyperparameterTuningJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_hyperparameter_tuning_job(self, - request: job_service.DeleteHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a HyperparameterTuningJob. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest`): - The request object. Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob - resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_hyperparameter_tuning_job(self, - request: job_service.CancelHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest`): - The request object. Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob to - cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_batch_prediction_job(self, - request: job_service.CreateBatchPredictionJobRequest = None, - *, - parent: str = None, - batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_batch_prediction_job.BatchPredictionJob: - r"""Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest`): - The request object. Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - batch_prediction_job (:class:`google.cloud.aiplatform_v1.types.BatchPredictionJob`): - Required. The BatchPredictionJob to - create. - - This corresponds to the ``batch_prediction_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if batch_prediction_job is not None: - request.batch_prediction_job = batch_prediction_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_batch_prediction_job(self, - request: job_service.GetBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> batch_prediction_job.BatchPredictionJob: - r"""Gets a BatchPredictionJob - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest`): - The request object. Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_batch_prediction_jobs(self, - request: job_service.ListBatchPredictionJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBatchPredictionJobsAsyncPager: - r"""Lists BatchPredictionJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest`): - The request object. Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: - Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListBatchPredictionJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_batch_prediction_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListBatchPredictionJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_batch_prediction_job(self, - request: job_service.DeleteBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest`): - The request object. Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob resource to - be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_batch_prediction_job(self, - request: job_service.CancelBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest`): - The request object. Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py deleted file mode 100644 index 76393c4891..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py +++ /dev/null @@ -1,2163 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.job_service import pagers -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import completion_stats -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import study -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import JobServiceGrpcTransport -from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport - - -class JobServiceClientMeta(type): - """Metaclass for the JobService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] - _transport_registry["grpc"] = JobServiceGrpcTransport - _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[JobServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class JobServiceClient(metaclass=JobServiceClientMeta): - """A service for creating and managing Vertex AI's jobs.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobServiceTransport: - """Returns the transport used by the client instance. - - Returns: - JobServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def batch_prediction_job_path(project: str,location: str,batch_prediction_job: str,) -> str: - """Returns a fully-qualified batch_prediction_job string.""" - return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) - - @staticmethod - def parse_batch_prediction_job_path(path: str) -> Dict[str,str]: - """Parses a batch_prediction_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def data_labeling_job_path(project: str,location: str,data_labeling_job: str,) -> str: - """Returns a fully-qualified data_labeling_job string.""" - return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) - - @staticmethod - def parse_data_labeling_job_path(path: str) -> Dict[str,str]: - """Parses a data_labeling_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def hyperparameter_tuning_job_path(project: str,location: str,hyperparameter_tuning_job: str,) -> str: - """Returns a fully-qualified hyperparameter_tuning_job string.""" - return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) - - @staticmethod - def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str,str]: - """Parses a hyperparameter_tuning_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def trial_path(project: str,location: str,study: str,trial: str,) -> str: - """Returns a fully-qualified trial string.""" - return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - - @staticmethod - def parse_trial_path(path: str) -> Dict[str,str]: - """Parses a trial path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, JobServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, JobServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, JobServiceTransport): - # transport is a JobServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_custom_job(self, - request: job_service.CreateCustomJobRequest = None, - *, - parent: str = None, - custom_job: gca_custom_job.CustomJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_custom_job.CustomJob: - r"""Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Args: - request (google.cloud.aiplatform_v1.types.CreateCustomJobRequest): - The request object. Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. - parent (str): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - custom_job (google.cloud.aiplatform_v1.types.CustomJob): - Required. The CustomJob to create. - This corresponds to the ``custom_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateCustomJobRequest): - request = job_service.CreateCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if custom_job is not None: - request.custom_job = custom_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_custom_job(self, - request: job_service.GetCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> custom_job.CustomJob: - r"""Gets a CustomJob. - - Args: - request (google.cloud.aiplatform_v1.types.GetCustomJobRequest): - The request object. Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. - name (str): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetCustomJobRequest): - request = job_service.GetCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_custom_jobs(self, - request: job_service.ListCustomJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListCustomJobsPager: - r"""Lists CustomJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): - The request object. Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. - parent (str): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager: - Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListCustomJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListCustomJobsRequest): - request = job_service.ListCustomJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListCustomJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_custom_job(self, - request: job_service.DeleteCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a CustomJob. - - Args: - request (google.cloud.aiplatform_v1.types.DeleteCustomJobRequest): - The request object. Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. - name (str): - Required. The name of the CustomJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteCustomJobRequest): - request = job_service.DeleteCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_custom_job(self, - request: job_service.CancelCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is - set to ``CANCELLED``. - - Args: - request (google.cloud.aiplatform_v1.types.CancelCustomJobRequest): - The request object. Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. - name (str): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelCustomJobRequest): - request = job_service.CancelCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_data_labeling_job(self, - request: job_service.CreateDataLabelingJobRequest = None, - *, - parent: str = None, - data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_data_labeling_job.DataLabelingJob: - r"""Creates a DataLabelingJob. - - Args: - request (google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest): - The request object. Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob): - Required. The DataLabelingJob to - create. - - This corresponds to the ``data_labeling_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateDataLabelingJobRequest): - request = job_service.CreateDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if data_labeling_job is not None: - request.data_labeling_job = data_labeling_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_data_labeling_job(self, - request: job_service.GetDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> data_labeling_job.DataLabelingJob: - r"""Gets a DataLabelingJob. - - Args: - request (google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest): - The request object. Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetDataLabelingJobRequest): - request = job_service.GetDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_data_labeling_jobs(self, - request: job_service.ListDataLabelingJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataLabelingJobsPager: - r"""Lists DataLabelingJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): - The request object. Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager: - Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListDataLabelingJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListDataLabelingJobsRequest): - request = job_service.ListDataLabelingJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDataLabelingJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_data_labeling_job(self, - request: job_service.DeleteDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a DataLabelingJob. - - Args: - request (google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest): - The request object. Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteDataLabelingJobRequest): - request = job_service.DeleteDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_data_labeling_job(self, - request: job_service.CancelDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Args: - request (google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest): - The request object. Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelDataLabelingJobRequest): - request = job_service.CancelDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_hyperparameter_tuning_job(self, - request: job_service.CreateHyperparameterTuningJobRequest = None, - *, - parent: str = None, - hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Creates a HyperparameterTuningJob - - Args: - request (google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest): - The request object. Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. - parent (str): - Required. The resource name of the Location to create - the HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob): - Required. The HyperparameterTuningJob - to create. - - This corresponds to the ``hyperparameter_tuning_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest): - request = job_service.CreateHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if hyperparameter_tuning_job is not None: - request.hyperparameter_tuning_job = hyperparameter_tuning_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_hyperparameter_tuning_job(self, - request: job_service.GetHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Gets a HyperparameterTuningJob - - Args: - request (google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest): - The request object. Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob - resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetHyperparameterTuningJobRequest): - request = job_service.GetHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_hyperparameter_tuning_jobs(self, - request: job_service.ListHyperparameterTuningJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHyperparameterTuningJobsPager: - r"""Lists HyperparameterTuningJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): - The request object. Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. - parent (str): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager: - Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListHyperparameterTuningJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest): - request = job_service.ListHyperparameterTuningJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_hyperparameter_tuning_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListHyperparameterTuningJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_hyperparameter_tuning_job(self, - request: job_service.DeleteHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a HyperparameterTuningJob. - - Args: - request (google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest): - The request object. Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob - resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest): - request = job_service.DeleteHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_hyperparameter_tuning_job(self, - request: job_service.CancelHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Args: - request (google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest): - The request object. Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob to - cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest): - request = job_service.CancelHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_batch_prediction_job(self, - request: job_service.CreateBatchPredictionJobRequest = None, - *, - parent: str = None, - batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_batch_prediction_job.BatchPredictionJob: - r"""Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Args: - request (google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest): - The request object. Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. - parent (str): - Required. The resource name of the Location to create - the BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob): - Required. The BatchPredictionJob to - create. - - This corresponds to the ``batch_prediction_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateBatchPredictionJobRequest): - request = job_service.CreateBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if batch_prediction_job is not None: - request.batch_prediction_job = batch_prediction_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_batch_prediction_job(self, - request: job_service.GetBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> batch_prediction_job.BatchPredictionJob: - r"""Gets a BatchPredictionJob - - Args: - request (google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest): - The request object. Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetBatchPredictionJobRequest): - request = job_service.GetBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_batch_prediction_jobs(self, - request: job_service.ListBatchPredictionJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBatchPredictionJobsPager: - r"""Lists BatchPredictionJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): - The request object. Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. - parent (str): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager: - Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListBatchPredictionJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListBatchPredictionJobsRequest): - request = job_service.ListBatchPredictionJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_batch_prediction_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListBatchPredictionJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_batch_prediction_job(self, - request: job_service.DeleteBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Args: - request (google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest): - The request object. Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob resource to - be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteBatchPredictionJobRequest): - request = job_service.DeleteBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_batch_prediction_job(self, - request: job_service.CancelBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Args: - request (google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest): - The request object. Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelBatchPredictionJobRequest): - request = job_service.CancelBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py deleted file mode 100644 index 2567abe945..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py +++ /dev/null @@ -1,510 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service - - -class ListCustomJobsPager: - """A pager for iterating through ``list_custom_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``custom_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListCustomJobs`` requests and continue to iterate - through the ``custom_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListCustomJobsResponse], - request: job_service.ListCustomJobsRequest, - response: job_service.ListCustomJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListCustomJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.ListCustomJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[custom_job.CustomJob]: - for page in self.pages: - yield from page.custom_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListCustomJobsAsyncPager: - """A pager for iterating through ``list_custom_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``custom_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListCustomJobs`` requests and continue to iterate - through the ``custom_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]], - request: job_service.ListCustomJobsRequest, - response: job_service.ListCustomJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListCustomJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.ListCustomJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[custom_job.CustomJob]: - async def async_generator(): - async for page in self.pages: - for response in page.custom_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataLabelingJobsPager: - """A pager for iterating through ``list_data_labeling_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``data_labeling_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDataLabelingJobs`` requests and continue to iterate - through the ``data_labeling_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListDataLabelingJobsResponse], - request: job_service.ListDataLabelingJobsRequest, - response: job_service.ListDataLabelingJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListDataLabelingJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.ListDataLabelingJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[data_labeling_job.DataLabelingJob]: - for page in self.pages: - yield from page.data_labeling_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataLabelingJobsAsyncPager: - """A pager for iterating through ``list_data_labeling_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``data_labeling_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDataLabelingJobs`` requests and continue to iterate - through the ``data_labeling_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]], - request: job_service.ListDataLabelingJobsRequest, - response: job_service.ListDataLabelingJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListDataLabelingJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.ListDataLabelingJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[data_labeling_job.DataLabelingJob]: - async def async_generator(): - async for page in self.pages: - for response in page.data_labeling_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHyperparameterTuningJobsPager: - """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``hyperparameter_tuning_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListHyperparameterTuningJobs`` requests and continue to iterate - through the ``hyperparameter_tuning_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListHyperparameterTuningJobsResponse], - request: job_service.ListHyperparameterTuningJobsRequest, - response: job_service.ListHyperparameterTuningJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListHyperparameterTuningJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.ListHyperparameterTuningJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[hyperparameter_tuning_job.HyperparameterTuningJob]: - for page in self.pages: - yield from page.hyperparameter_tuning_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHyperparameterTuningJobsAsyncPager: - """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``hyperparameter_tuning_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListHyperparameterTuningJobs`` requests and continue to iterate - through the ``hyperparameter_tuning_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]], - request: job_service.ListHyperparameterTuningJobsRequest, - response: job_service.ListHyperparameterTuningJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListHyperparameterTuningJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.ListHyperparameterTuningJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[hyperparameter_tuning_job.HyperparameterTuningJob]: - async def async_generator(): - async for page in self.pages: - for response in page.hyperparameter_tuning_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBatchPredictionJobsPager: - """A pager for iterating through ``list_batch_prediction_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``batch_prediction_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListBatchPredictionJobs`` requests and continue to iterate - through the ``batch_prediction_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListBatchPredictionJobsResponse], - request: job_service.ListBatchPredictionJobsRequest, - response: job_service.ListBatchPredictionJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListBatchPredictionJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.ListBatchPredictionJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[batch_prediction_job.BatchPredictionJob]: - for page in self.pages: - yield from page.batch_prediction_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBatchPredictionJobsAsyncPager: - """A pager for iterating through ``list_batch_prediction_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``batch_prediction_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListBatchPredictionJobs`` requests and continue to iterate - through the ``batch_prediction_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]], - request: job_service.ListBatchPredictionJobsRequest, - response: job_service.ListBatchPredictionJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListBatchPredictionJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.ListBatchPredictionJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[batch_prediction_job.BatchPredictionJob]: - async def async_generator(): - async for page in self.pages: - for response in page.batch_prediction_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py deleted file mode 100644 index 13c5f7ade5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import JobServiceTransport -from .grpc import JobServiceGrpcTransport -from .grpc_asyncio import JobServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] -_transport_registry['grpc'] = JobServiceGrpcTransport -_transport_registry['grpc_asyncio'] = JobServiceGrpcAsyncIOTransport - -__all__ = ( - 'JobServiceTransport', - 'JobServiceGrpcTransport', - 'JobServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py deleted file mode 100644 index 0f419ef18b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py +++ /dev/null @@ -1,450 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class JobServiceTransport(abc.ABC): - """Abstract transport class for JobService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_custom_job: gapic_v1.method.wrap_method( - self.create_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_custom_job: gapic_v1.method.wrap_method( - self.get_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_custom_jobs: gapic_v1.method.wrap_method( - self.list_custom_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_custom_job: gapic_v1.method.wrap_method( - self.delete_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_custom_job: gapic_v1.method.wrap_method( - self.cancel_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_data_labeling_job: gapic_v1.method.wrap_method( - self.create_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_data_labeling_job: gapic_v1.method.wrap_method( - self.get_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_data_labeling_jobs: gapic_v1.method.wrap_method( - self.list_data_labeling_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_data_labeling_job: gapic_v1.method.wrap_method( - self.delete_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_data_labeling_job: gapic_v1.method.wrap_method( - self.cancel_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.create_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.get_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( - self.list_hyperparameter_tuning_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.delete_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.cancel_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_batch_prediction_job: gapic_v1.method.wrap_method( - self.create_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_batch_prediction_job: gapic_v1.method.wrap_method( - self.get_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( - self.list_batch_prediction_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_batch_prediction_job: gapic_v1.method.wrap_method( - self.delete_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( - self.cancel_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - Union[ - gca_custom_job.CustomJob, - Awaitable[gca_custom_job.CustomJob] - ]]: - raise NotImplementedError() - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - Union[ - custom_job.CustomJob, - Awaitable[custom_job.CustomJob] - ]]: - raise NotImplementedError() - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - Union[ - job_service.ListCustomJobsResponse, - Awaitable[job_service.ListCustomJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - Union[ - gca_data_labeling_job.DataLabelingJob, - Awaitable[gca_data_labeling_job.DataLabelingJob] - ]]: - raise NotImplementedError() - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - Union[ - data_labeling_job.DataLabelingJob, - Awaitable[data_labeling_job.DataLabelingJob] - ]]: - raise NotImplementedError() - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - Union[ - job_service.ListDataLabelingJobsResponse, - Awaitable[job_service.ListDataLabelingJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - Union[ - gca_hyperparameter_tuning_job.HyperparameterTuningJob, - Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob] - ]]: - raise NotImplementedError() - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - Union[ - hyperparameter_tuning_job.HyperparameterTuningJob, - Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob] - ]]: - raise NotImplementedError() - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - Union[ - job_service.ListHyperparameterTuningJobsResponse, - Awaitable[job_service.ListHyperparameterTuningJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - Union[ - gca_batch_prediction_job.BatchPredictionJob, - Awaitable[gca_batch_prediction_job.BatchPredictionJob] - ]]: - raise NotImplementedError() - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - Union[ - batch_prediction_job.BatchPredictionJob, - Awaitable[batch_prediction_job.BatchPredictionJob] - ]]: - raise NotImplementedError() - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - Union[ - job_service.ListBatchPredictionJobsResponse, - Awaitable[job_service.ListBatchPredictionJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'JobServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py deleted file mode 100644 index 7543a064cb..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ /dev/null @@ -1,818 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobServiceTransport, DEFAULT_CLIENT_INFO - - -class JobServiceGrpcTransport(JobServiceTransport): - """gRPC backend transport for JobService. - - A service for creating and managing Vertex AI's jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - gca_custom_job.CustomJob]: - r"""Return a callable for the create custom job method over gRPC. - - Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Returns: - Callable[[~.CreateCustomJobRequest], - ~.CustomJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_custom_job' not in self._stubs: - self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateCustomJob', - request_serializer=job_service.CreateCustomJobRequest.serialize, - response_deserializer=gca_custom_job.CustomJob.deserialize, - ) - return self._stubs['create_custom_job'] - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - custom_job.CustomJob]: - r"""Return a callable for the get custom job method over gRPC. - - Gets a CustomJob. - - Returns: - Callable[[~.GetCustomJobRequest], - ~.CustomJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_custom_job' not in self._stubs: - self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetCustomJob', - request_serializer=job_service.GetCustomJobRequest.serialize, - response_deserializer=custom_job.CustomJob.deserialize, - ) - return self._stubs['get_custom_job'] - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - job_service.ListCustomJobsResponse]: - r"""Return a callable for the list custom jobs method over gRPC. - - Lists CustomJobs in a Location. - - Returns: - Callable[[~.ListCustomJobsRequest], - ~.ListCustomJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_custom_jobs' not in self._stubs: - self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListCustomJobs', - request_serializer=job_service.ListCustomJobsRequest.serialize, - response_deserializer=job_service.ListCustomJobsResponse.deserialize, - ) - return self._stubs['list_custom_jobs'] - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete custom job method over gRPC. - - Deletes a CustomJob. - - Returns: - Callable[[~.DeleteCustomJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_custom_job' not in self._stubs: - self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteCustomJob', - request_serializer=job_service.DeleteCustomJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_custom_job'] - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel custom job method over gRPC. - - Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is - set to ``CANCELLED``. - - Returns: - Callable[[~.CancelCustomJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_custom_job' not in self._stubs: - self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelCustomJob', - request_serializer=job_service.CancelCustomJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_custom_job'] - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - gca_data_labeling_job.DataLabelingJob]: - r"""Return a callable for the create data labeling job method over gRPC. - - Creates a DataLabelingJob. - - Returns: - Callable[[~.CreateDataLabelingJobRequest], - ~.DataLabelingJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_data_labeling_job' not in self._stubs: - self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob', - request_serializer=job_service.CreateDataLabelingJobRequest.serialize, - response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['create_data_labeling_job'] - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - data_labeling_job.DataLabelingJob]: - r"""Return a callable for the get data labeling job method over gRPC. - - Gets a DataLabelingJob. - - Returns: - Callable[[~.GetDataLabelingJobRequest], - ~.DataLabelingJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_data_labeling_job' not in self._stubs: - self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob', - request_serializer=job_service.GetDataLabelingJobRequest.serialize, - response_deserializer=data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['get_data_labeling_job'] - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - job_service.ListDataLabelingJobsResponse]: - r"""Return a callable for the list data labeling jobs method over gRPC. - - Lists DataLabelingJobs in a Location. - - Returns: - Callable[[~.ListDataLabelingJobsRequest], - ~.ListDataLabelingJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_labeling_jobs' not in self._stubs: - self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs', - request_serializer=job_service.ListDataLabelingJobsRequest.serialize, - response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, - ) - return self._stubs['list_data_labeling_jobs'] - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete data labeling job method over gRPC. - - Deletes a DataLabelingJob. - - Returns: - Callable[[~.DeleteDataLabelingJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_data_labeling_job' not in self._stubs: - self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob', - request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_data_labeling_job'] - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel data labeling job method over gRPC. - - Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Returns: - Callable[[~.CancelDataLabelingJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_data_labeling_job' not in self._stubs: - self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob', - request_serializer=job_service.CancelDataLabelingJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_data_labeling_job'] - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - gca_hyperparameter_tuning_job.HyperparameterTuningJob]: - r"""Return a callable for the create hyperparameter tuning - job method over gRPC. - - Creates a HyperparameterTuningJob - - Returns: - Callable[[~.CreateHyperparameterTuningJobRequest], - ~.HyperparameterTuningJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_hyperparameter_tuning_job' not in self._stubs: - self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob', - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['create_hyperparameter_tuning_job'] - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - hyperparameter_tuning_job.HyperparameterTuningJob]: - r"""Return a callable for the get hyperparameter tuning job method over gRPC. - - Gets a HyperparameterTuningJob - - Returns: - Callable[[~.GetHyperparameterTuningJobRequest], - ~.HyperparameterTuningJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_hyperparameter_tuning_job' not in self._stubs: - self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob', - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['get_hyperparameter_tuning_job'] - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - job_service.ListHyperparameterTuningJobsResponse]: - r"""Return a callable for the list hyperparameter tuning - jobs method over gRPC. - - Lists HyperparameterTuningJobs in a Location. - - Returns: - Callable[[~.ListHyperparameterTuningJobsRequest], - ~.ListHyperparameterTuningJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hyperparameter_tuning_jobs' not in self._stubs: - self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs', - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, - ) - return self._stubs['list_hyperparameter_tuning_jobs'] - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete hyperparameter tuning - job method over gRPC. - - Deletes a HyperparameterTuningJob. - - Returns: - Callable[[~.DeleteHyperparameterTuningJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_hyperparameter_tuning_job' not in self._stubs: - self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob', - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_hyperparameter_tuning_job'] - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel hyperparameter tuning - job method over gRPC. - - Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelHyperparameterTuningJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_hyperparameter_tuning_job' not in self._stubs: - self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob', - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_hyperparameter_tuning_job'] - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - gca_batch_prediction_job.BatchPredictionJob]: - r"""Return a callable for the create batch prediction job method over gRPC. - - Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Returns: - Callable[[~.CreateBatchPredictionJobRequest], - ~.BatchPredictionJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_batch_prediction_job' not in self._stubs: - self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob', - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['create_batch_prediction_job'] - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - batch_prediction_job.BatchPredictionJob]: - r"""Return a callable for the get batch prediction job method over gRPC. - - Gets a BatchPredictionJob - - Returns: - Callable[[~.GetBatchPredictionJobRequest], - ~.BatchPredictionJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_batch_prediction_job' not in self._stubs: - self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob', - request_serializer=job_service.GetBatchPredictionJobRequest.serialize, - response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['get_batch_prediction_job'] - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - job_service.ListBatchPredictionJobsResponse]: - r"""Return a callable for the list batch prediction jobs method over gRPC. - - Lists BatchPredictionJobs in a Location. - - Returns: - Callable[[~.ListBatchPredictionJobsRequest], - ~.ListBatchPredictionJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_batch_prediction_jobs' not in self._stubs: - self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs', - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, - ) - return self._stubs['list_batch_prediction_jobs'] - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete batch prediction job method over gRPC. - - Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Returns: - Callable[[~.DeleteBatchPredictionJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_batch_prediction_job' not in self._stubs: - self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob', - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_batch_prediction_job'] - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel batch prediction job method over gRPC. - - Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Returns: - Callable[[~.CancelBatchPredictionJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_batch_prediction_job' not in self._stubs: - self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob', - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_batch_prediction_job'] - - -__all__ = ( - 'JobServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py deleted file mode 100644 index e684252c6b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,822 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import JobServiceGrpcTransport - - -class JobServiceGrpcAsyncIOTransport(JobServiceTransport): - """gRPC AsyncIO backend transport for JobService. - - A service for creating and managing Vertex AI's jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - Awaitable[gca_custom_job.CustomJob]]: - r"""Return a callable for the create custom job method over gRPC. - - Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Returns: - Callable[[~.CreateCustomJobRequest], - Awaitable[~.CustomJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_custom_job' not in self._stubs: - self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateCustomJob', - request_serializer=job_service.CreateCustomJobRequest.serialize, - response_deserializer=gca_custom_job.CustomJob.deserialize, - ) - return self._stubs['create_custom_job'] - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - Awaitable[custom_job.CustomJob]]: - r"""Return a callable for the get custom job method over gRPC. - - Gets a CustomJob. - - Returns: - Callable[[~.GetCustomJobRequest], - Awaitable[~.CustomJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_custom_job' not in self._stubs: - self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetCustomJob', - request_serializer=job_service.GetCustomJobRequest.serialize, - response_deserializer=custom_job.CustomJob.deserialize, - ) - return self._stubs['get_custom_job'] - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - Awaitable[job_service.ListCustomJobsResponse]]: - r"""Return a callable for the list custom jobs method over gRPC. - - Lists CustomJobs in a Location. - - Returns: - Callable[[~.ListCustomJobsRequest], - Awaitable[~.ListCustomJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_custom_jobs' not in self._stubs: - self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListCustomJobs', - request_serializer=job_service.ListCustomJobsRequest.serialize, - response_deserializer=job_service.ListCustomJobsResponse.deserialize, - ) - return self._stubs['list_custom_jobs'] - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete custom job method over gRPC. - - Deletes a CustomJob. - - Returns: - Callable[[~.DeleteCustomJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_custom_job' not in self._stubs: - self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteCustomJob', - request_serializer=job_service.DeleteCustomJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_custom_job'] - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel custom job method over gRPC. - - Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is - set to ``CANCELLED``. - - Returns: - Callable[[~.CancelCustomJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_custom_job' not in self._stubs: - self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelCustomJob', - request_serializer=job_service.CancelCustomJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_custom_job'] - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - Awaitable[gca_data_labeling_job.DataLabelingJob]]: - r"""Return a callable for the create data labeling job method over gRPC. - - Creates a DataLabelingJob. - - Returns: - Callable[[~.CreateDataLabelingJobRequest], - Awaitable[~.DataLabelingJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_data_labeling_job' not in self._stubs: - self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob', - request_serializer=job_service.CreateDataLabelingJobRequest.serialize, - response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['create_data_labeling_job'] - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - Awaitable[data_labeling_job.DataLabelingJob]]: - r"""Return a callable for the get data labeling job method over gRPC. - - Gets a DataLabelingJob. - - Returns: - Callable[[~.GetDataLabelingJobRequest], - Awaitable[~.DataLabelingJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_data_labeling_job' not in self._stubs: - self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob', - request_serializer=job_service.GetDataLabelingJobRequest.serialize, - response_deserializer=data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['get_data_labeling_job'] - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - Awaitable[job_service.ListDataLabelingJobsResponse]]: - r"""Return a callable for the list data labeling jobs method over gRPC. - - Lists DataLabelingJobs in a Location. - - Returns: - Callable[[~.ListDataLabelingJobsRequest], - Awaitable[~.ListDataLabelingJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_labeling_jobs' not in self._stubs: - self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs', - request_serializer=job_service.ListDataLabelingJobsRequest.serialize, - response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, - ) - return self._stubs['list_data_labeling_jobs'] - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete data labeling job method over gRPC. - - Deletes a DataLabelingJob. - - Returns: - Callable[[~.DeleteDataLabelingJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_data_labeling_job' not in self._stubs: - self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob', - request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_data_labeling_job'] - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel data labeling job method over gRPC. - - Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Returns: - Callable[[~.CancelDataLabelingJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_data_labeling_job' not in self._stubs: - self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob', - request_serializer=job_service.CancelDataLabelingJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_data_labeling_job'] - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob]]: - r"""Return a callable for the create hyperparameter tuning - job method over gRPC. - - Creates a HyperparameterTuningJob - - Returns: - Callable[[~.CreateHyperparameterTuningJobRequest], - Awaitable[~.HyperparameterTuningJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_hyperparameter_tuning_job' not in self._stubs: - self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob', - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['create_hyperparameter_tuning_job'] - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob]]: - r"""Return a callable for the get hyperparameter tuning job method over gRPC. - - Gets a HyperparameterTuningJob - - Returns: - Callable[[~.GetHyperparameterTuningJobRequest], - Awaitable[~.HyperparameterTuningJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_hyperparameter_tuning_job' not in self._stubs: - self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob', - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['get_hyperparameter_tuning_job'] - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - Awaitable[job_service.ListHyperparameterTuningJobsResponse]]: - r"""Return a callable for the list hyperparameter tuning - jobs method over gRPC. - - Lists HyperparameterTuningJobs in a Location. - - Returns: - Callable[[~.ListHyperparameterTuningJobsRequest], - Awaitable[~.ListHyperparameterTuningJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hyperparameter_tuning_jobs' not in self._stubs: - self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs', - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, - ) - return self._stubs['list_hyperparameter_tuning_jobs'] - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete hyperparameter tuning - job method over gRPC. - - Deletes a HyperparameterTuningJob. - - Returns: - Callable[[~.DeleteHyperparameterTuningJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_hyperparameter_tuning_job' not in self._stubs: - self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob', - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_hyperparameter_tuning_job'] - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel hyperparameter tuning - job method over gRPC. - - Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelHyperparameterTuningJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_hyperparameter_tuning_job' not in self._stubs: - self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob', - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_hyperparameter_tuning_job'] - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - Awaitable[gca_batch_prediction_job.BatchPredictionJob]]: - r"""Return a callable for the create batch prediction job method over gRPC. - - Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Returns: - Callable[[~.CreateBatchPredictionJobRequest], - Awaitable[~.BatchPredictionJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_batch_prediction_job' not in self._stubs: - self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob', - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['create_batch_prediction_job'] - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - Awaitable[batch_prediction_job.BatchPredictionJob]]: - r"""Return a callable for the get batch prediction job method over gRPC. - - Gets a BatchPredictionJob - - Returns: - Callable[[~.GetBatchPredictionJobRequest], - Awaitable[~.BatchPredictionJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_batch_prediction_job' not in self._stubs: - self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob', - request_serializer=job_service.GetBatchPredictionJobRequest.serialize, - response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['get_batch_prediction_job'] - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - Awaitable[job_service.ListBatchPredictionJobsResponse]]: - r"""Return a callable for the list batch prediction jobs method over gRPC. - - Lists BatchPredictionJobs in a Location. - - Returns: - Callable[[~.ListBatchPredictionJobsRequest], - Awaitable[~.ListBatchPredictionJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_batch_prediction_jobs' not in self._stubs: - self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs', - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, - ) - return self._stubs['list_batch_prediction_jobs'] - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete batch prediction job method over gRPC. - - Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Returns: - Callable[[~.DeleteBatchPredictionJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_batch_prediction_job' not in self._stubs: - self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob', - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_batch_prediction_job'] - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel batch prediction job method over gRPC. - - Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Returns: - Callable[[~.CancelBatchPredictionJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_batch_prediction_job' not in self._stubs: - self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob', - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_batch_prediction_job'] - - -__all__ = ( - 'JobServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py deleted file mode 100644 index b32b10b1d7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import MigrationServiceClient -from .async_client import MigrationServiceAsyncClient - -__all__ = ( - 'MigrationServiceClient', - 'MigrationServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py deleted file mode 100644 index c2d497872e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.migration_service import pagers -from google.cloud.aiplatform_v1.types import migratable_resource -from google.cloud.aiplatform_v1.types import migration_service -from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport -from .client import MigrationServiceClient - - -class MigrationServiceAsyncClient: - """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - """ - - _client: MigrationServiceClient - - DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT - - annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path) - parse_annotated_dataset_path = staticmethod(MigrationServiceClient.parse_annotated_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - model_path = staticmethod(MigrationServiceClient.model_path) - parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) - model_path = staticmethod(MigrationServiceClient.model_path) - parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) - version_path = staticmethod(MigrationServiceClient.version_path) - parse_version_path = staticmethod(MigrationServiceClient.parse_version_path) - common_billing_account_path = staticmethod(MigrationServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(MigrationServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(MigrationServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(MigrationServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(MigrationServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(MigrationServiceClient.parse_common_organization_path) - common_project_path = staticmethod(MigrationServiceClient.common_project_path) - parse_common_project_path = staticmethod(MigrationServiceClient.parse_common_project_path) - common_location_path = staticmethod(MigrationServiceClient.common_location_path) - parse_common_location_path = staticmethod(MigrationServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceAsyncClient: The constructed client. - """ - return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceAsyncClient: The constructed client. - """ - return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MigrationServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MigrationServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, MigrationServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the migration service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.MigrationServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = MigrationServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def search_migratable_resources(self, - request: migration_service.SearchMigratableResourcesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchMigratableResourcesAsyncPager: - r"""Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest`): - The request object. Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - parent (:class:`str`): - Required. The location that the migratable resources - should be searched from. It's the Vertex AI location - that the resources can be migrated to, not the - resources' original location. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: - Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = migration_service.SearchMigratableResourcesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_migratable_resources, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchMigratableResourcesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def batch_migrate_resources(self, - request: migration_service.BatchMigrateResourcesRequest = None, - *, - parent: str = None, - migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest`): - The request object. Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - parent (:class:`str`): - Required. The location of the migrated resource will - live in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - migrate_resource_requests (:class:`Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]`): - Required. The request messages - specifying the resources to migrate. - They must be in the same location as the - destination. Up to 50 resources can be - migrated in one batch. - - This corresponds to the ``migrate_resource_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` - Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = migration_service.BatchMigrateResourcesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if migrate_resource_requests: - request.migrate_resource_requests.extend(migrate_resource_requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_migrate_resources, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - migration_service.BatchMigrateResourcesResponse, - metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MigrationServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py deleted file mode 100644 index 0c288329ab..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py +++ /dev/null @@ -1,617 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.migration_service import pagers -from google.cloud.aiplatform_v1.types import migratable_resource -from google.cloud.aiplatform_v1.types import migration_service -from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import MigrationServiceGrpcTransport -from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport - - -class MigrationServiceClientMeta(type): - """Metaclass for the MigrationService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] - _transport_registry["grpc"] = MigrationServiceGrpcTransport - _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[MigrationServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class MigrationServiceClient(metaclass=MigrationServiceClientMeta): - """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MigrationServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MigrationServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotated_dataset_path(project: str,dataset: str,annotated_dataset: str,) -> str: - """Returns a fully-qualified annotated_dataset string.""" - return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) - - @staticmethod - def parse_annotated_dataset_path(path: str) -> Dict[str,str]: - """Parses a annotated_dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def version_path(project: str,model: str,version: str,) -> str: - """Returns a fully-qualified version string.""" - return "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) - - @staticmethod - def parse_version_path(path: str) -> Dict[str,str]: - """Parses a version path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, MigrationServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the migration service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, MigrationServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, MigrationServiceTransport): - # transport is a MigrationServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def search_migratable_resources(self, - request: migration_service.SearchMigratableResourcesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchMigratableResourcesPager: - r"""Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Args: - request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): - The request object. Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - parent (str): - Required. The location that the migratable resources - should be searched from. It's the Vertex AI location - that the resources can be migrated to, not the - resources' original location. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager: - Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a migration_service.SearchMigratableResourcesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, migration_service.SearchMigratableResourcesRequest): - request = migration_service.SearchMigratableResourcesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_migratable_resources] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchMigratableResourcesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def batch_migrate_resources(self, - request: migration_service.BatchMigrateResourcesRequest = None, - *, - parent: str = None, - migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Args: - request (google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest): - The request object. Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - parent (str): - Required. The location of the migrated resource will - live in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]): - Required. The request messages - specifying the resources to migrate. - They must be in the same location as the - destination. Up to 50 resources can be - migrated in one batch. - - This corresponds to the ``migrate_resource_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` - Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a migration_service.BatchMigrateResourcesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, migration_service.BatchMigrateResourcesRequest): - request = migration_service.BatchMigrateResourcesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if migrate_resource_requests is not None: - request.migrate_resource_requests = migrate_resource_requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - migration_service.BatchMigrateResourcesResponse, - metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MigrationServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py deleted file mode 100644 index b173c77431..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1.types import migratable_resource -from google.cloud.aiplatform_v1.types import migration_service - - -class SearchMigratableResourcesPager: - """A pager for iterating through ``search_migratable_resources`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``migratable_resources`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchMigratableResources`` requests and continue to iterate - through the ``migratable_resources`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., migration_service.SearchMigratableResourcesResponse], - request: migration_service.SearchMigratableResourcesRequest, - response: migration_service.SearchMigratableResourcesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = migration_service.SearchMigratableResourcesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[migration_service.SearchMigratableResourcesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[migratable_resource.MigratableResource]: - for page in self.pages: - yield from page.migratable_resources - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchMigratableResourcesAsyncPager: - """A pager for iterating through ``search_migratable_resources`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``migratable_resources`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchMigratableResources`` requests and continue to iterate - through the ``migratable_resources`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[migration_service.SearchMigratableResourcesResponse]], - request: migration_service.SearchMigratableResourcesRequest, - response: migration_service.SearchMigratableResourcesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = migration_service.SearchMigratableResourcesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[migration_service.SearchMigratableResourcesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[migratable_resource.MigratableResource]: - async def async_generator(): - async for page in self.pages: - for response in page.migratable_resources: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py deleted file mode 100644 index 8f036c410e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import MigrationServiceTransport -from .grpc import MigrationServiceGrpcTransport -from .grpc_asyncio import MigrationServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] -_transport_registry['grpc'] = MigrationServiceGrpcTransport -_transport_registry['grpc_asyncio'] = MigrationServiceGrpcAsyncIOTransport - -__all__ = ( - 'MigrationServiceTransport', - 'MigrationServiceGrpcTransport', - 'MigrationServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py deleted file mode 100644 index 054992b551..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py +++ /dev/null @@ -1,189 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class MigrationServiceTransport(abc.ABC): - """Abstract transport class for MigrationService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.search_migratable_resources: gapic_v1.method.wrap_method( - self.search_migratable_resources, - default_timeout=None, - client_info=client_info, - ), - self.batch_migrate_resources: gapic_v1.method.wrap_method( - self.batch_migrate_resources, - default_timeout=None, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - Union[ - migration_service.SearchMigratableResourcesResponse, - Awaitable[migration_service.SearchMigratableResourcesResponse] - ]]: - raise NotImplementedError() - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'MigrationServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py deleted file mode 100644 index f1eaa10238..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py +++ /dev/null @@ -1,303 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore -from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO - - -class MigrationServiceGrpcTransport(MigrationServiceTransport): - """gRPC backend transport for MigrationService. - - A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - migration_service.SearchMigratableResourcesResponse]: - r"""Return a callable for the search migratable resources method over gRPC. - - Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Returns: - Callable[[~.SearchMigratableResourcesRequest], - ~.SearchMigratableResourcesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_migratable_resources' not in self._stubs: - self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources', - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, - ) - return self._stubs['search_migratable_resources'] - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch migrate resources method over gRPC. - - Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Returns: - Callable[[~.BatchMigrateResourcesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_migrate_resources' not in self._stubs: - self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources', - request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_migrate_resources'] - - -__all__ = ( - 'MigrationServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py deleted file mode 100644 index 15acd530c6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,307 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore -from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import MigrationServiceGrpcTransport - - -class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): - """gRPC AsyncIO backend transport for MigrationService. - - A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - Awaitable[migration_service.SearchMigratableResourcesResponse]]: - r"""Return a callable for the search migratable resources method over gRPC. - - Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Returns: - Callable[[~.SearchMigratableResourcesRequest], - Awaitable[~.SearchMigratableResourcesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_migratable_resources' not in self._stubs: - self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources', - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, - ) - return self._stubs['search_migratable_resources'] - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch migrate resources method over gRPC. - - Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Returns: - Callable[[~.BatchMigrateResourcesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_migrate_resources' not in self._stubs: - self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources', - request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_migrate_resources'] - - -__all__ = ( - 'MigrationServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py deleted file mode 100644 index 5c4d570d15..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import ModelServiceClient -from .async_client import ModelServiceAsyncClient - -__all__ = ( - 'ModelServiceClient', - 'ModelServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py deleted file mode 100644 index f4ca7cf4c8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ /dev/null @@ -1,1059 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.model_service import pagers -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport -from .client import ModelServiceClient - - -class ModelServiceAsyncClient: - """A service for managing Vertex AI's machine learning Models.""" - - _client: ModelServiceClient - - DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(ModelServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(ModelServiceClient.parse_endpoint_path) - model_path = staticmethod(ModelServiceClient.model_path) - parse_model_path = staticmethod(ModelServiceClient.parse_model_path) - model_evaluation_path = staticmethod(ModelServiceClient.model_evaluation_path) - parse_model_evaluation_path = staticmethod(ModelServiceClient.parse_model_evaluation_path) - model_evaluation_slice_path = staticmethod(ModelServiceClient.model_evaluation_slice_path) - parse_model_evaluation_slice_path = staticmethod(ModelServiceClient.parse_model_evaluation_slice_path) - training_pipeline_path = staticmethod(ModelServiceClient.training_pipeline_path) - parse_training_pipeline_path = staticmethod(ModelServiceClient.parse_training_pipeline_path) - common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(ModelServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(ModelServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path) - common_project_path = staticmethod(ModelServiceClient.common_project_path) - parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path) - common_location_path = staticmethod(ModelServiceClient.common_location_path) - parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceAsyncClient: The constructed client. - """ - return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceAsyncClient: The constructed client. - """ - return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ModelServiceTransport: - """Returns the transport used by the client instance. - - Returns: - ModelServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, ModelServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the model service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.ModelServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = ModelServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def upload_model(self, - request: model_service.UploadModelRequest = None, - *, - parent: str = None, - model: gca_model.Model = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Uploads a Model artifact into Vertex AI. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.UploadModelRequest`): - The request object. Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. - parent (:class:`str`): - Required. The resource name of the Location into which - to upload the Model. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (:class:`google.cloud.aiplatform_v1.types.Model`): - Required. The Model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` - Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.UploadModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.upload_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - model_service.UploadModelResponse, - metadata_type=model_service.UploadModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model(self, - request: model_service.GetModelRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a Model. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetModelRequest`): - The request object. Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. - name (:class:`str`): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_models(self, - request: model_service.ListModelsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsAsyncPager: - r"""Lists Models in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListModelsRequest`): - The request object. Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. - parent (:class:`str`): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager: - Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_models, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_model(self, - request: model_service.UpdateModelRequest = None, - *, - model: gca_model.Model = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a Model. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.UpdateModelRequest`): - The request object. Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. - model (:class:`google.cloud.aiplatform_v1.types.Model`): - Required. The Model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.UpdateModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_model(self, - request: model_service.DeleteModelRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeleteModelRequest`): - The request object. Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. - name (:class:`str`): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.DeleteModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_model(self, - request: model_service.ExportModelRequest = None, - *, - name: str = None, - output_config: model_service.ExportModelRequest.OutputConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports a trained, exportable, Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ExportModelRequest`): - The request object. Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. - name (:class:`str`): - Required. The resource name of the Model to export. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig`): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` - Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ExportModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - model_service.ExportModelResponse, - metadata_type=model_service.ExportModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation(self, - request: model_service.GetModelEvaluationRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a ModelEvaluation. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetModelEvaluationRequest`): - The request object. Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. - name (:class:`str`): - Required. The name of the ModelEvaluation resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelEvaluation: - A collection of metrics calculated by - comparing Model's predictions on all of - the test data against annotations from - the test data. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelEvaluationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluations(self, - request: model_service.ListModelEvaluationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsAsyncPager: - r"""Lists ModelEvaluations in a Model. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest`): - The request object. Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - parent (:class:`str`): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager: - Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelEvaluationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluations, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation_slice(self, - request: model_service.GetModelEvaluationSliceRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation_slice.ModelEvaluationSlice: - r"""Gets a ModelEvaluationSlice. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest`): - The request object. Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. - name (:class:`str`): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelEvaluationSlice: - A collection of metrics calculated by - comparing Model's predictions on a slice - of the test data against ground truth - annotations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelEvaluationSliceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation_slice, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluation_slices(self, - request: model_service.ListModelEvaluationSlicesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationSlicesAsyncPager: - r"""Lists ModelEvaluationSlices in a ModelEvaluation. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest`): - The request object. Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - parent (:class:`str`): - Required. The resource name of the ModelEvaluation to - list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: - Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelEvaluationSlicesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluation_slices, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationSlicesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ModelServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py deleted file mode 100644 index 80401b2ec2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py +++ /dev/null @@ -1,1282 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.model_service import pagers -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import ModelServiceGrpcTransport -from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport - - -class ModelServiceClientMeta(type): - """Metaclass for the ModelService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] - _transport_registry["grpc"] = ModelServiceGrpcTransport - _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[ModelServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class ModelServiceClient(metaclass=ModelServiceClientMeta): - """A service for managing Vertex AI's machine learning Models.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ModelServiceTransport: - """Returns the transport used by the client instance. - - Returns: - ModelServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_path(project: str,location: str,model: str,evaluation: str,) -> str: - """Returns a fully-qualified model_evaluation string.""" - return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) - - @staticmethod - def parse_model_evaluation_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_slice_path(project: str,location: str,model: str,evaluation: str,slice: str,) -> str: - """Returns a fully-qualified model_evaluation_slice string.""" - return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) - - @staticmethod - def parse_model_evaluation_slice_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation_slice path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: - """Returns a fully-qualified training_pipeline string.""" - return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - - @staticmethod - def parse_training_pipeline_path(path: str) -> Dict[str,str]: - """Parses a training_pipeline path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, ModelServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the model service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ModelServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, ModelServiceTransport): - # transport is a ModelServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def upload_model(self, - request: model_service.UploadModelRequest = None, - *, - parent: str = None, - model: gca_model.Model = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Uploads a Model artifact into Vertex AI. - - Args: - request (google.cloud.aiplatform_v1.types.UploadModelRequest): - The request object. Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. - parent (str): - Required. The resource name of the Location into which - to upload the Model. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (google.cloud.aiplatform_v1.types.Model): - Required. The Model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` - Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.UploadModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.UploadModelRequest): - request = model_service.UploadModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.upload_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - model_service.UploadModelResponse, - metadata_type=model_service.UploadModelOperationMetadata, - ) - - # Done; return the response. - return response - - def get_model(self, - request: model_service.GetModelRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a Model. - - Args: - request (google.cloud.aiplatform_v1.types.GetModelRequest): - The request object. Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. - name (str): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelRequest): - request = model_service.GetModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_models(self, - request: model_service.ListModelsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsPager: - r"""Lists Models in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListModelsRequest): - The request object. Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. - parent (str): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager: - Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelsRequest): - request = model_service.ListModelsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_models] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_model(self, - request: model_service.UpdateModelRequest = None, - *, - model: gca_model.Model = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a Model. - - Args: - request (google.cloud.aiplatform_v1.types.UpdateModelRequest): - The request object. Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. - model (google.cloud.aiplatform_v1.types.Model): - Required. The Model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.UpdateModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.UpdateModelRequest): - request = model_service.UpdateModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_model(self, - request: model_service.DeleteModelRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. - - Args: - request (google.cloud.aiplatform_v1.types.DeleteModelRequest): - The request object. Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. - name (str): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.DeleteModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.DeleteModelRequest): - request = model_service.DeleteModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def export_model(self, - request: model_service.ExportModelRequest = None, - *, - name: str = None, - output_config: model_service.ExportModelRequest.OutputConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports a trained, exportable, Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - - Args: - request (google.cloud.aiplatform_v1.types.ExportModelRequest): - The request object. Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. - name (str): - Required. The resource name of the Model to export. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` - Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ExportModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ExportModelRequest): - request = model_service.ExportModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - model_service.ExportModelResponse, - metadata_type=model_service.ExportModelOperationMetadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation(self, - request: model_service.GetModelEvaluationRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a ModelEvaluation. - - Args: - request (google.cloud.aiplatform_v1.types.GetModelEvaluationRequest): - The request object. Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. - name (str): - Required. The name of the ModelEvaluation resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelEvaluation: - A collection of metrics calculated by - comparing Model's predictions on all of - the test data against annotations from - the test data. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelEvaluationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelEvaluationRequest): - request = model_service.GetModelEvaluationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluations(self, - request: model_service.ListModelEvaluationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsPager: - r"""Lists ModelEvaluations in a Model. - - Args: - request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): - The request object. Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - parent (str): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager: - Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelEvaluationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelEvaluationsRequest): - request = model_service.ListModelEvaluationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation_slice(self, - request: model_service.GetModelEvaluationSliceRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation_slice.ModelEvaluationSlice: - r"""Gets a ModelEvaluationSlice. - - Args: - request (google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest): - The request object. Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. - name (str): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelEvaluationSlice: - A collection of metrics calculated by - comparing Model's predictions on a slice - of the test data against ground truth - annotations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelEvaluationSliceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelEvaluationSliceRequest): - request = model_service.GetModelEvaluationSliceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation_slice] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluation_slices(self, - request: model_service.ListModelEvaluationSlicesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationSlicesPager: - r"""Lists ModelEvaluationSlices in a ModelEvaluation. - - Args: - request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): - The request object. Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - parent (str): - Required. The resource name of the ModelEvaluation to - list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager: - Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelEvaluationSlicesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelEvaluationSlicesRequest): - request = model_service.ListModelEvaluationSlicesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluation_slices] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationSlicesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ModelServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py deleted file mode 100644 index 908b0fe9ac..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service - - -class ListModelsPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``models`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``models`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelsResponse], - request: model_service.ListModelsRequest, - response: model_service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[model_service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[model.Model]: - for page in self.pages: - yield from page.models - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelsAsyncPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``models`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``models`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelsResponse]], - request: model_service.ListModelsRequest, - response: model_service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[model_service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[model.Model]: - async def async_generator(): - async for page in self.pages: - for response in page.models: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluations`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelEvaluationsResponse], - request: model_service.ListModelEvaluationsRequest, - response: model_service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[model_service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]: - for page in self.pages: - yield from page.model_evaluations - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsAsyncPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluations`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]], - request: model_service.ListModelEvaluationsRequest, - response: model_service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluations: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationSlicesPager: - """A pager for iterating through ``list_model_evaluation_slices`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluation_slices`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluationSlices`` requests and continue to iterate - through the ``model_evaluation_slices`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelEvaluationSlicesResponse], - request: model_service.ListModelEvaluationSlicesRequest, - response: model_service.ListModelEvaluationSlicesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationSlicesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[model_service.ListModelEvaluationSlicesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[model_evaluation_slice.ModelEvaluationSlice]: - for page in self.pages: - yield from page.model_evaluation_slices - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationSlicesAsyncPager: - """A pager for iterating through ``list_model_evaluation_slices`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluation_slices`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluationSlices`` requests and continue to iterate - through the ``model_evaluation_slices`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelEvaluationSlicesResponse]], - request: model_service.ListModelEvaluationSlicesRequest, - response: model_service.ListModelEvaluationSlicesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationSlicesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationSlicesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[model_evaluation_slice.ModelEvaluationSlice]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluation_slices: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py deleted file mode 100644 index 0f09224d3c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import ModelServiceTransport -from .grpc import ModelServiceGrpcTransport -from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] -_transport_registry['grpc'] = ModelServiceGrpcTransport -_transport_registry['grpc_asyncio'] = ModelServiceGrpcAsyncIOTransport - -__all__ = ( - 'ModelServiceTransport', - 'ModelServiceGrpcTransport', - 'ModelServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py deleted file mode 100644 index 96d0c92e92..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ /dev/null @@ -1,305 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class ModelServiceTransport(abc.ABC): - """Abstract transport class for ModelService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.upload_model: gapic_v1.method.wrap_method( - self.upload_model, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model: gapic_v1.method.wrap_method( - self.get_model, - default_timeout=5.0, - client_info=client_info, - ), - self.list_models: gapic_v1.method.wrap_method( - self.list_models, - default_timeout=5.0, - client_info=client_info, - ), - self.update_model: gapic_v1.method.wrap_method( - self.update_model, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_model: gapic_v1.method.wrap_method( - self.delete_model, - default_timeout=5.0, - client_info=client_info, - ), - self.export_model: gapic_v1.method.wrap_method( - self.export_model, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_evaluation: gapic_v1.method.wrap_method( - self.get_model_evaluation, - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_evaluations: gapic_v1.method.wrap_method( - self.list_model_evaluations, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_evaluation_slice: gapic_v1.method.wrap_method( - self.get_model_evaluation_slice, - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_evaluation_slices: gapic_v1.method.wrap_method( - self.list_model_evaluation_slices, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - Union[ - model.Model, - Awaitable[model.Model] - ]]: - raise NotImplementedError() - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - Union[ - model_service.ListModelsResponse, - Awaitable[model_service.ListModelsResponse] - ]]: - raise NotImplementedError() - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - Union[ - gca_model.Model, - Awaitable[gca_model.Model] - ]]: - raise NotImplementedError() - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - Union[ - model_evaluation.ModelEvaluation, - Awaitable[model_evaluation.ModelEvaluation] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - Union[ - model_service.ListModelEvaluationsResponse, - Awaitable[model_service.ListModelEvaluationsResponse] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - Union[ - model_evaluation_slice.ModelEvaluationSlice, - Awaitable[model_evaluation_slice.ModelEvaluationSlice] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - Union[ - model_service.ListModelEvaluationSlicesResponse, - Awaitable[model_service.ListModelEvaluationSlicesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'ModelServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py deleted file mode 100644 index c3aa1b5a38..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ /dev/null @@ -1,514 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.longrunning import operations_pb2 # type: ignore -from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO - - -class ModelServiceGrpcTransport(ModelServiceTransport): - """gRPC backend transport for ModelService. - - A service for managing Vertex AI's machine learning Models. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the upload model method over gRPC. - - Uploads a Model artifact into Vertex AI. - - Returns: - Callable[[~.UploadModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'upload_model' not in self._stubs: - self._stubs['upload_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/UploadModel', - request_serializer=model_service.UploadModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['upload_model'] - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - model.Model]: - r"""Return a callable for the get model method over gRPC. - - Gets a Model. - - Returns: - Callable[[~.GetModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModel', - request_serializer=model_service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - model_service.ListModelsResponse]: - r"""Return a callable for the list models method over gRPC. - - Lists Models in a Location. - - Returns: - Callable[[~.ListModelsRequest], - ~.ListModelsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModels', - request_serializer=model_service.ListModelsRequest.serialize, - response_deserializer=model_service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - gca_model.Model]: - r"""Return a callable for the update model method over gRPC. - - Updates a Model. - - Returns: - Callable[[~.UpdateModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/UpdateModel', - request_serializer=model_service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. - - Returns: - Callable[[~.DeleteModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/DeleteModel', - request_serializer=model_service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, exportable, Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - - Returns: - Callable[[~.ExportModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ExportModel', - request_serializer=model_service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - model_evaluation.ModelEvaluation]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a ModelEvaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - ~.ModelEvaluation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation', - request_serializer=model_service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - model_service.ListModelEvaluationsResponse]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists ModelEvaluations in a Model. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - ~.ListModelEvaluationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations', - request_serializer=model_service.ListModelEvaluationsRequest.serialize, - response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - model_evaluation_slice.ModelEvaluationSlice]: - r"""Return a callable for the get model evaluation slice method over gRPC. - - Gets a ModelEvaluationSlice. - - Returns: - Callable[[~.GetModelEvaluationSliceRequest], - ~.ModelEvaluationSlice]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation_slice' not in self._stubs: - self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice', - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, - ) - return self._stubs['get_model_evaluation_slice'] - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - model_service.ListModelEvaluationSlicesResponse]: - r"""Return a callable for the list model evaluation slices method over gRPC. - - Lists ModelEvaluationSlices in a ModelEvaluation. - - Returns: - Callable[[~.ListModelEvaluationSlicesRequest], - ~.ListModelEvaluationSlicesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluation_slices' not in self._stubs: - self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices', - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, - ) - return self._stubs['list_model_evaluation_slices'] - - -__all__ = ( - 'ModelServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py deleted file mode 100644 index 429ad907b1..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,518 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.longrunning import operations_pb2 # type: ignore -from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import ModelServiceGrpcTransport - - -class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): - """gRPC AsyncIO backend transport for ModelService. - - A service for managing Vertex AI's machine learning Models. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the upload model method over gRPC. - - Uploads a Model artifact into Vertex AI. - - Returns: - Callable[[~.UploadModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'upload_model' not in self._stubs: - self._stubs['upload_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/UploadModel', - request_serializer=model_service.UploadModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['upload_model'] - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - Awaitable[model.Model]]: - r"""Return a callable for the get model method over gRPC. - - Gets a Model. - - Returns: - Callable[[~.GetModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModel', - request_serializer=model_service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - Awaitable[model_service.ListModelsResponse]]: - r"""Return a callable for the list models method over gRPC. - - Lists Models in a Location. - - Returns: - Callable[[~.ListModelsRequest], - Awaitable[~.ListModelsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModels', - request_serializer=model_service.ListModelsRequest.serialize, - response_deserializer=model_service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - Awaitable[gca_model.Model]]: - r"""Return a callable for the update model method over gRPC. - - Updates a Model. - - Returns: - Callable[[~.UpdateModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/UpdateModel', - request_serializer=model_service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. - - Returns: - Callable[[~.DeleteModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/DeleteModel', - request_serializer=model_service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, exportable, Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - - Returns: - Callable[[~.ExportModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ExportModel', - request_serializer=model_service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - Awaitable[model_evaluation.ModelEvaluation]]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a ModelEvaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - Awaitable[~.ModelEvaluation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation', - request_serializer=model_service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - Awaitable[model_service.ListModelEvaluationsResponse]]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists ModelEvaluations in a Model. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - Awaitable[~.ListModelEvaluationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations', - request_serializer=model_service.ListModelEvaluationsRequest.serialize, - response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - Awaitable[model_evaluation_slice.ModelEvaluationSlice]]: - r"""Return a callable for the get model evaluation slice method over gRPC. - - Gets a ModelEvaluationSlice. - - Returns: - Callable[[~.GetModelEvaluationSliceRequest], - Awaitable[~.ModelEvaluationSlice]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation_slice' not in self._stubs: - self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice', - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, - ) - return self._stubs['get_model_evaluation_slice'] - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - Awaitable[model_service.ListModelEvaluationSlicesResponse]]: - r"""Return a callable for the list model evaluation slices method over gRPC. - - Lists ModelEvaluationSlices in a ModelEvaluation. - - Returns: - Callable[[~.ListModelEvaluationSlicesRequest], - Awaitable[~.ListModelEvaluationSlicesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluation_slices' not in self._stubs: - self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices', - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, - ) - return self._stubs['list_model_evaluation_slices'] - - -__all__ = ( - 'ModelServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py deleted file mode 100644 index 539616023d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PipelineServiceClient -from .async_client import PipelineServiceAsyncClient - -__all__ = ( - 'PipelineServiceClient', - 'PipelineServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py deleted file mode 100644 index afe7a6b8e0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ /dev/null @@ -1,1069 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import pipeline_state -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport -from .client import PipelineServiceClient - - -class PipelineServiceAsyncClient: - """A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex - Pipelines). - """ - - _client: PipelineServiceClient - - DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT - - artifact_path = staticmethod(PipelineServiceClient.artifact_path) - parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) - context_path = staticmethod(PipelineServiceClient.context_path) - parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) - custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) - endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) - execution_path = staticmethod(PipelineServiceClient.execution_path) - parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) - model_path = staticmethod(PipelineServiceClient.model_path) - parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) - network_path = staticmethod(PipelineServiceClient.network_path) - parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) - pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) - parse_pipeline_job_path = staticmethod(PipelineServiceClient.parse_pipeline_job_path) - training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) - parse_training_pipeline_path = staticmethod(PipelineServiceClient.parse_training_pipeline_path) - common_billing_account_path = staticmethod(PipelineServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PipelineServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PipelineServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PipelineServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PipelineServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PipelineServiceClient.common_project_path) - parse_common_project_path = staticmethod(PipelineServiceClient.parse_common_project_path) - common_location_path = staticmethod(PipelineServiceClient.common_location_path) - parse_common_location_path = staticmethod(PipelineServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceAsyncClient: The constructed client. - """ - return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceAsyncClient: The constructed client. - """ - return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PipelineServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PipelineServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the pipeline service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PipelineServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PipelineServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_training_pipeline(self, - request: pipeline_service.CreateTrainingPipelineRequest = None, - *, - parent: str = None, - training_pipeline: gca_training_pipeline.TrainingPipeline = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_training_pipeline.TrainingPipeline: - r"""Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest`): - The request object. Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. - parent (:class:`str`): - Required. The resource name of the Location to create - the TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - training_pipeline (:class:`google.cloud.aiplatform_v1.types.TrainingPipeline`): - Required. The TrainingPipeline to - create. - - This corresponds to the ``training_pipeline`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CreateTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if training_pipeline is not None: - request.training_pipeline = training_pipeline - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_training_pipeline(self, - request: pipeline_service.GetTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> training_pipeline.TrainingPipeline: - r"""Gets a TrainingPipeline. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest`): - The request object. Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline resource. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.GetTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_training_pipelines(self, - request: pipeline_service.ListTrainingPipelinesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrainingPipelinesAsyncPager: - r"""Lists TrainingPipelines in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest`): - The request object. Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. - parent (:class:`str`): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: - Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.ListTrainingPipelinesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_training_pipelines, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTrainingPipelinesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_training_pipeline(self, - request: pipeline_service.DeleteTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TrainingPipeline. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest`): - The request object. Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline resource to - be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.DeleteTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_training_pipeline(self, - request: pipeline_service.CancelTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest`): - The request object. Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CancelTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_pipeline_job(self, - request: pipeline_service.CreatePipelineJobRequest = None, - *, - parent: str = None, - pipeline_job: gca_pipeline_job.PipelineJob = None, - pipeline_job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_pipeline_job.PipelineJob: - r"""Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CreatePipelineJobRequest`): - The request object. Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job (:class:`google.cloud.aiplatform_v1.types.PipelineJob`): - Required. The PipelineJob to create. - This corresponds to the ``pipeline_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job_id (:class:`str`): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not - provided, an ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``pipeline_job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CreatePipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if pipeline_job is not None: - request.pipeline_job = pipeline_job - if pipeline_job_id is not None: - request.pipeline_job_id = pipeline_job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_pipeline_job(self, - request: pipeline_service.GetPipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pipeline_job.PipelineJob: - r"""Gets a PipelineJob. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetPipelineJobRequest`): - The request object. Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.GetPipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_pipeline_jobs(self, - request: pipeline_service.ListPipelineJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListPipelineJobsAsyncPager: - r"""Lists PipelineJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListPipelineJobsRequest`): - The request object. Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: - Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.ListPipelineJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_pipeline_jobs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListPipelineJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_pipeline_job(self, - request: pipeline_service.DeletePipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a PipelineJob. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeletePipelineJobRequest`): - The request object. Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.DeletePipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_pipeline_job(self, - request: pipeline_service.CancelPipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] - is set to ``CANCELLED``. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CancelPipelineJobRequest`): - The request object. Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CancelPipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PipelineServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py deleted file mode 100644 index 1fa4015ac6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ /dev/null @@ -1,1328 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import pipeline_state -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PipelineServiceGrpcTransport -from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport - - -class PipelineServiceClientMeta(type): - """Metaclass for the PipelineService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] - _transport_registry["grpc"] = PipelineServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[PipelineServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PipelineServiceClient(metaclass=PipelineServiceClientMeta): - """A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex - Pipelines). - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PipelineServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PipelineServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: - """Returns a fully-qualified artifact string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - - @staticmethod - def parse_artifact_path(path: str) -> Dict[str,str]: - """Parses a artifact path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: - """Returns a fully-qualified context string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - - @staticmethod - def parse_context_path(path: str) -> Dict[str,str]: - """Parses a context path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: - """Returns a fully-qualified execution string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - - @staticmethod - def parse_execution_path(path: str) -> Dict[str,str]: - """Parses a execution path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def pipeline_job_path(project: str,location: str,pipeline_job: str,) -> str: - """Returns a fully-qualified pipeline_job string.""" - return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) - - @staticmethod - def parse_pipeline_job_path(path: str) -> Dict[str,str]: - """Parses a pipeline_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: - """Returns a fully-qualified training_pipeline string.""" - return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - - @staticmethod - def parse_training_pipeline_path(path: str) -> Dict[str,str]: - """Parses a training_pipeline path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PipelineServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the pipeline service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PipelineServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PipelineServiceTransport): - # transport is a PipelineServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_training_pipeline(self, - request: pipeline_service.CreateTrainingPipelineRequest = None, - *, - parent: str = None, - training_pipeline: gca_training_pipeline.TrainingPipeline = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_training_pipeline.TrainingPipeline: - r"""Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Args: - request (google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest): - The request object. Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. - parent (str): - Required. The resource name of the Location to create - the TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline): - Required. The TrainingPipeline to - create. - - This corresponds to the ``training_pipeline`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CreateTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CreateTrainingPipelineRequest): - request = pipeline_service.CreateTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if training_pipeline is not None: - request.training_pipeline = training_pipeline - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_training_pipeline(self, - request: pipeline_service.GetTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> training_pipeline.TrainingPipeline: - r"""Gets a TrainingPipeline. - - Args: - request (google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest): - The request object. Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline resource. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.GetTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.GetTrainingPipelineRequest): - request = pipeline_service.GetTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_training_pipelines(self, - request: pipeline_service.ListTrainingPipelinesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrainingPipelinesPager: - r"""Lists TrainingPipelines in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): - The request object. Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. - parent (str): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesPager: - Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.ListTrainingPipelinesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.ListTrainingPipelinesRequest): - request = pipeline_service.ListTrainingPipelinesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_training_pipelines] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTrainingPipelinesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_training_pipeline(self, - request: pipeline_service.DeleteTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TrainingPipeline. - - Args: - request (google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest): - The request object. Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline resource to - be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.DeleteTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.DeleteTrainingPipelineRequest): - request = pipeline_service.DeleteTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_training_pipeline(self, - request: pipeline_service.CancelTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Args: - request (google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest): - The request object. Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CancelTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CancelTrainingPipelineRequest): - request = pipeline_service.CancelTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_pipeline_job(self, - request: pipeline_service.CreatePipelineJobRequest = None, - *, - parent: str = None, - pipeline_job: gca_pipeline_job.PipelineJob = None, - pipeline_job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_pipeline_job.PipelineJob: - r"""Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Args: - request (google.cloud.aiplatform_v1.types.CreatePipelineJobRequest): - The request object. Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. - parent (str): - Required. The resource name of the Location to create - the PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): - Required. The PipelineJob to create. - This corresponds to the ``pipeline_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job_id (str): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not - provided, an ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``pipeline_job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CreatePipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CreatePipelineJobRequest): - request = pipeline_service.CreatePipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if pipeline_job is not None: - request.pipeline_job = pipeline_job - if pipeline_job_id is not None: - request.pipeline_job_id = pipeline_job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_pipeline_job(self, - request: pipeline_service.GetPipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pipeline_job.PipelineJob: - r"""Gets a PipelineJob. - - Args: - request (google.cloud.aiplatform_v1.types.GetPipelineJobRequest): - The request object. Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. - name (str): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.GetPipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.GetPipelineJobRequest): - request = pipeline_service.GetPipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_pipeline_jobs(self, - request: pipeline_service.ListPipelineJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListPipelineJobsPager: - r"""Lists PipelineJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): - The request object. Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. - parent (str): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsPager: - Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.ListPipelineJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.ListPipelineJobsRequest): - request = pipeline_service.ListPipelineJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListPipelineJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_pipeline_job(self, - request: pipeline_service.DeletePipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a PipelineJob. - - Args: - request (google.cloud.aiplatform_v1.types.DeletePipelineJobRequest): - The request object. Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. - name (str): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.DeletePipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.DeletePipelineJobRequest): - request = pipeline_service.DeletePipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_pipeline_job(self, - request: pipeline_service.CancelPipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] - is set to ``CANCELLED``. - - Args: - request (google.cloud.aiplatform_v1.types.CancelPipelineJobRequest): - The request object. Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. - name (str): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CancelPipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CancelPipelineJobRequest): - request = pipeline_service.CancelPipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PipelineServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py deleted file mode 100644 index faa4e19346..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import training_pipeline - - -class ListTrainingPipelinesPager: - """A pager for iterating through ``list_training_pipelines`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``training_pipelines`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTrainingPipelines`` requests and continue to iterate - through the ``training_pipelines`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., pipeline_service.ListTrainingPipelinesResponse], - request: pipeline_service.ListTrainingPipelinesRequest, - response: pipeline_service.ListTrainingPipelinesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListTrainingPipelinesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[pipeline_service.ListTrainingPipelinesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[training_pipeline.TrainingPipeline]: - for page in self.pages: - yield from page.training_pipelines - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrainingPipelinesAsyncPager: - """A pager for iterating through ``list_training_pipelines`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``training_pipelines`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTrainingPipelines`` requests and continue to iterate - through the ``training_pipelines`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[pipeline_service.ListTrainingPipelinesResponse]], - request: pipeline_service.ListTrainingPipelinesRequest, - response: pipeline_service.ListTrainingPipelinesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListTrainingPipelinesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[pipeline_service.ListTrainingPipelinesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[training_pipeline.TrainingPipeline]: - async def async_generator(): - async for page in self.pages: - for response in page.training_pipelines: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListPipelineJobsPager: - """A pager for iterating through ``list_pipeline_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``pipeline_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListPipelineJobs`` requests and continue to iterate - through the ``pipeline_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., pipeline_service.ListPipelineJobsResponse], - request: pipeline_service.ListPipelineJobsRequest, - response: pipeline_service.ListPipelineJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListPipelineJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[pipeline_service.ListPipelineJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[pipeline_job.PipelineJob]: - for page in self.pages: - yield from page.pipeline_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListPipelineJobsAsyncPager: - """A pager for iterating through ``list_pipeline_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``pipeline_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListPipelineJobs`` requests and continue to iterate - through the ``pipeline_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], - request: pipeline_service.ListPipelineJobsRequest, - response: pipeline_service.ListPipelineJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListPipelineJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[pipeline_service.ListPipelineJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[pipeline_job.PipelineJob]: - async def async_generator(): - async for page in self.pages: - for response in page.pipeline_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py deleted file mode 100644 index 77051d8254..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PipelineServiceTransport -from .grpc import PipelineServiceGrpcTransport -from .grpc_asyncio import PipelineServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] -_transport_registry['grpc'] = PipelineServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PipelineServiceGrpcAsyncIOTransport - -__all__ = ( - 'PipelineServiceTransport', - 'PipelineServiceGrpcTransport', - 'PipelineServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py deleted file mode 100644 index f2f3cc392e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ /dev/null @@ -1,306 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class PipelineServiceTransport(abc.ABC): - """Abstract transport class for PipelineService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_training_pipeline: gapic_v1.method.wrap_method( - self.create_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.get_training_pipeline: gapic_v1.method.wrap_method( - self.get_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.list_training_pipelines: gapic_v1.method.wrap_method( - self.list_training_pipelines, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_training_pipeline: gapic_v1.method.wrap_method( - self.delete_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_training_pipeline: gapic_v1.method.wrap_method( - self.cancel_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.create_pipeline_job: gapic_v1.method.wrap_method( - self.create_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.get_pipeline_job: gapic_v1.method.wrap_method( - self.get_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.list_pipeline_jobs: gapic_v1.method.wrap_method( - self.list_pipeline_jobs, - default_timeout=None, - client_info=client_info, - ), - self.delete_pipeline_job: gapic_v1.method.wrap_method( - self.delete_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.cancel_pipeline_job: gapic_v1.method.wrap_method( - self.cancel_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - Union[ - gca_training_pipeline.TrainingPipeline, - Awaitable[gca_training_pipeline.TrainingPipeline] - ]]: - raise NotImplementedError() - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - Union[ - training_pipeline.TrainingPipeline, - Awaitable[training_pipeline.TrainingPipeline] - ]]: - raise NotImplementedError() - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - Union[ - pipeline_service.ListTrainingPipelinesResponse, - Awaitable[pipeline_service.ListTrainingPipelinesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - Union[ - gca_pipeline_job.PipelineJob, - Awaitable[gca_pipeline_job.PipelineJob] - ]]: - raise NotImplementedError() - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - Union[ - pipeline_job.PipelineJob, - Awaitable[pipeline_job.PipelineJob] - ]]: - raise NotImplementedError() - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - Union[ - pipeline_service.ListPipelineJobsResponse, - Awaitable[pipeline_service.ListPipelineJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'PipelineServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py deleted file mode 100644 index 10bae61fa1..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ /dev/null @@ -1,539 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO - - -class PipelineServiceGrpcTransport(PipelineServiceTransport): - """gRPC backend transport for PipelineService. - - A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex - Pipelines). - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - gca_training_pipeline.TrainingPipeline]: - r"""Return a callable for the create training pipeline method over gRPC. - - Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Returns: - Callable[[~.CreateTrainingPipelineRequest], - ~.TrainingPipeline]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_training_pipeline' not in self._stubs: - self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline', - request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, - response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['create_training_pipeline'] - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - training_pipeline.TrainingPipeline]: - r"""Return a callable for the get training pipeline method over gRPC. - - Gets a TrainingPipeline. - - Returns: - Callable[[~.GetTrainingPipelineRequest], - ~.TrainingPipeline]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_training_pipeline' not in self._stubs: - self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline', - request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, - response_deserializer=training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['get_training_pipeline'] - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - pipeline_service.ListTrainingPipelinesResponse]: - r"""Return a callable for the list training pipelines method over gRPC. - - Lists TrainingPipelines in a Location. - - Returns: - Callable[[~.ListTrainingPipelinesRequest], - ~.ListTrainingPipelinesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_training_pipelines' not in self._stubs: - self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines', - request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, - response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, - ) - return self._stubs['list_training_pipelines'] - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete training pipeline method over gRPC. - - Deletes a TrainingPipeline. - - Returns: - Callable[[~.DeleteTrainingPipelineRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_training_pipeline' not in self._stubs: - self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline', - request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_training_pipeline'] - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel training pipeline method over gRPC. - - Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelTrainingPipelineRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_training_pipeline' not in self._stubs: - self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline', - request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_training_pipeline'] - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - gca_pipeline_job.PipelineJob]: - r"""Return a callable for the create pipeline job method over gRPC. - - Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Returns: - Callable[[~.CreatePipelineJobRequest], - ~.PipelineJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_pipeline_job' not in self._stubs: - self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob', - request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, - response_deserializer=gca_pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['create_pipeline_job'] - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - pipeline_job.PipelineJob]: - r"""Return a callable for the get pipeline job method over gRPC. - - Gets a PipelineJob. - - Returns: - Callable[[~.GetPipelineJobRequest], - ~.PipelineJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_pipeline_job' not in self._stubs: - self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob', - request_serializer=pipeline_service.GetPipelineJobRequest.serialize, - response_deserializer=pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['get_pipeline_job'] - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - pipeline_service.ListPipelineJobsResponse]: - r"""Return a callable for the list pipeline jobs method over gRPC. - - Lists PipelineJobs in a Location. - - Returns: - Callable[[~.ListPipelineJobsRequest], - ~.ListPipelineJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_pipeline_jobs' not in self._stubs: - self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs', - request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, - response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, - ) - return self._stubs['list_pipeline_jobs'] - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete pipeline job method over gRPC. - - Deletes a PipelineJob. - - Returns: - Callable[[~.DeletePipelineJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_pipeline_job' not in self._stubs: - self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob', - request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_pipeline_job'] - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel pipeline job method over gRPC. - - Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelPipelineJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_pipeline_job' not in self._stubs: - self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob', - request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_pipeline_job'] - - -__all__ = ( - 'PipelineServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py deleted file mode 100644 index db6ff3625b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,543 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PipelineServiceGrpcTransport - - -class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): - """gRPC AsyncIO backend transport for PipelineService. - - A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex - Pipelines). - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - Awaitable[gca_training_pipeline.TrainingPipeline]]: - r"""Return a callable for the create training pipeline method over gRPC. - - Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Returns: - Callable[[~.CreateTrainingPipelineRequest], - Awaitable[~.TrainingPipeline]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_training_pipeline' not in self._stubs: - self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline', - request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, - response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['create_training_pipeline'] - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - Awaitable[training_pipeline.TrainingPipeline]]: - r"""Return a callable for the get training pipeline method over gRPC. - - Gets a TrainingPipeline. - - Returns: - Callable[[~.GetTrainingPipelineRequest], - Awaitable[~.TrainingPipeline]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_training_pipeline' not in self._stubs: - self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline', - request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, - response_deserializer=training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['get_training_pipeline'] - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - Awaitable[pipeline_service.ListTrainingPipelinesResponse]]: - r"""Return a callable for the list training pipelines method over gRPC. - - Lists TrainingPipelines in a Location. - - Returns: - Callable[[~.ListTrainingPipelinesRequest], - Awaitable[~.ListTrainingPipelinesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_training_pipelines' not in self._stubs: - self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines', - request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, - response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, - ) - return self._stubs['list_training_pipelines'] - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete training pipeline method over gRPC. - - Deletes a TrainingPipeline. - - Returns: - Callable[[~.DeleteTrainingPipelineRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_training_pipeline' not in self._stubs: - self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline', - request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_training_pipeline'] - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel training pipeline method over gRPC. - - Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelTrainingPipelineRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_training_pipeline' not in self._stubs: - self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline', - request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_training_pipeline'] - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - Awaitable[gca_pipeline_job.PipelineJob]]: - r"""Return a callable for the create pipeline job method over gRPC. - - Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Returns: - Callable[[~.CreatePipelineJobRequest], - Awaitable[~.PipelineJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_pipeline_job' not in self._stubs: - self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob', - request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, - response_deserializer=gca_pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['create_pipeline_job'] - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - Awaitable[pipeline_job.PipelineJob]]: - r"""Return a callable for the get pipeline job method over gRPC. - - Gets a PipelineJob. - - Returns: - Callable[[~.GetPipelineJobRequest], - Awaitable[~.PipelineJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_pipeline_job' not in self._stubs: - self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob', - request_serializer=pipeline_service.GetPipelineJobRequest.serialize, - response_deserializer=pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['get_pipeline_job'] - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - Awaitable[pipeline_service.ListPipelineJobsResponse]]: - r"""Return a callable for the list pipeline jobs method over gRPC. - - Lists PipelineJobs in a Location. - - Returns: - Callable[[~.ListPipelineJobsRequest], - Awaitable[~.ListPipelineJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_pipeline_jobs' not in self._stubs: - self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs', - request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, - response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, - ) - return self._stubs['list_pipeline_jobs'] - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete pipeline job method over gRPC. - - Deletes a PipelineJob. - - Returns: - Callable[[~.DeletePipelineJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_pipeline_job' not in self._stubs: - self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob', - request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_pipeline_job'] - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel pipeline job method over gRPC. - - Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelPipelineJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_pipeline_job' not in self._stubs: - self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob', - request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_pipeline_job'] - - -__all__ = ( - 'PipelineServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py deleted file mode 100644 index 13c5d11c66..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PredictionServiceClient -from .async_client import PredictionServiceAsyncClient - -__all__ = ( - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py deleted file mode 100644 index 39d2b4fb93..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ /dev/null @@ -1,272 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import prediction_service -from google.protobuf import struct_pb2 # type: ignore -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .client import PredictionServiceClient - - -class PredictionServiceAsyncClient: - """A service for online predictions and explanations.""" - - _client: PredictionServiceClient - - DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) - common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PredictionServiceClient.common_project_path) - parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) - common_location_path = staticmethod(PredictionServiceClient.common_location_path) - parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PredictionServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def predict(self, - request: prediction_service.PredictRequest = None, - *, - endpoint: str = None, - parameters: struct_pb2.Value = None, - instances: Sequence[struct_pb2.Value] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.PredictRequest`): - The request object. Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - endpoint (:class:`str`): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (:class:`google.protobuf.struct_pb2.Value`): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): - Required. The instances that are the input to the - prediction call. A DeployedModel may have an upper limit - on the number of instances it supports per request, and - when it is exceeded the prediction call errors in case - of AutoML Models, or, in case of customer created - Models, the behaviour is as documented by that Model. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, parameters, instances]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.PredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if parameters is not None: - request.parameters = parameters - if instances: - request.instances.extend(instances) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.predict, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PredictionServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py deleted file mode 100644 index 70658eb86c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ /dev/null @@ -1,459 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import prediction_service -from google.protobuf import struct_pb2 # type: ignore -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PredictionServiceGrpcTransport -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport - - -class PredictionServiceClientMeta(type): - """Metaclass for the PredictionService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] - _transport_registry["grpc"] = PredictionServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[PredictionServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PredictionServiceClient(metaclass=PredictionServiceClientMeta): - """A service for online predictions and explanations.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PredictionServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PredictionServiceTransport): - # transport is a PredictionServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def predict(self, - request: prediction_service.PredictRequest = None, - *, - endpoint: str = None, - parameters: struct_pb2.Value = None, - instances: Sequence[struct_pb2.Value] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. - - Args: - request (google.cloud.aiplatform_v1.types.PredictRequest): - The request object. Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - endpoint (str): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - prediction call. A DeployedModel may have an upper limit - on the number of instances it supports per request, and - when it is exceeded the prediction call errors in case - of AutoML Models, or, in case of customer created - Models, the behaviour is as documented by that Model. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, parameters, instances]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.PredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.PredictRequest): - request = prediction_service.PredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if parameters is not None: - request.parameters = parameters - if instances is not None: - request.instances = instances - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PredictionServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py deleted file mode 100644 index d747de2ce9..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PredictionServiceTransport -from .grpc import PredictionServiceGrpcTransport -from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] -_transport_registry['grpc'] = PredictionServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport - -__all__ = ( - 'PredictionServiceTransport', - 'PredictionServiceGrpcTransport', - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py deleted file mode 100644 index c40011876e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import prediction_service - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class PredictionServiceTransport(abc.ABC): - """Abstract transport class for PredictionService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.predict: gapic_v1.method.wrap_method( - self.predict, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Union[ - prediction_service.PredictResponse, - Awaitable[prediction_service.PredictResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'PredictionServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py deleted file mode 100644 index 29f8097bb6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py +++ /dev/null @@ -1,252 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import prediction_service -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO - - -class PredictionServiceGrpcTransport(PredictionServiceTransport): - """gRPC backend transport for PredictionService. - - A service for online predictions and explanations. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - prediction_service.PredictResponse]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. - - Returns: - Callable[[~.PredictRequest], - ~.PredictResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - -__all__ = ( - 'PredictionServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py deleted file mode 100644 index 3b2163fe26..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,256 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import prediction_service -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PredictionServiceGrpcTransport - - -class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): - """gRPC AsyncIO backend transport for PredictionService. - - A service for online predictions and explanations. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Awaitable[prediction_service.PredictResponse]]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. - - Returns: - Callable[[~.PredictRequest], - Awaitable[~.PredictResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - -__all__ = ( - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py deleted file mode 100644 index 04af59e5fa..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import SpecialistPoolServiceClient -from .async_client import SpecialistPoolServiceAsyncClient - -__all__ = ( - 'SpecialistPoolServiceClient', - 'SpecialistPoolServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py deleted file mode 100644 index b622e7c910..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ /dev/null @@ -1,650 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport -from .client import SpecialistPoolServiceClient - - -class SpecialistPoolServiceAsyncClient: - """A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - """ - - _client: SpecialistPoolServiceClient - - DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT - - specialist_pool_path = staticmethod(SpecialistPoolServiceClient.specialist_pool_path) - parse_specialist_pool_path = staticmethod(SpecialistPoolServiceClient.parse_specialist_pool_path) - common_billing_account_path = staticmethod(SpecialistPoolServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(SpecialistPoolServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(SpecialistPoolServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(SpecialistPoolServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(SpecialistPoolServiceClient.parse_common_organization_path) - common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path) - parse_common_project_path = staticmethod(SpecialistPoolServiceClient.parse_common_project_path) - common_location_path = staticmethod(SpecialistPoolServiceClient.common_location_path) - parse_common_location_path = staticmethod(SpecialistPoolServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceAsyncClient: The constructed client. - """ - return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceAsyncClient: The constructed client. - """ - return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> SpecialistPoolServiceTransport: - """Returns the transport used by the client instance. - - Returns: - SpecialistPoolServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(SpecialistPoolServiceClient).get_transport_class, type(SpecialistPoolServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the specialist pool service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.SpecialistPoolServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = SpecialistPoolServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_specialist_pool(self, - request: specialist_pool_service.CreateSpecialistPoolRequest = None, - *, - parent: str = None, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a SpecialistPool. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest`): - The request object. Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. - parent (:class:`str`): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`): - Required. The SpecialistPool to - create. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, specialist_pool]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.CreateSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if specialist_pool is not None: - request.specialist_pool = specialist_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_specialist_pool(self, - request: specialist_pool_service.GetSpecialistPoolRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> specialist_pool.SpecialistPool: - r"""Gets a SpecialistPool. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest`): - The request object. Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. - name (:class:`str`): - Required. The name of the SpecialistPool resource. The - form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.SpecialistPool: - SpecialistPool represents customers' - own workforce to work on their data - labeling jobs. It includes a group of - specialist managers who are responsible - for managing the labelers in this pool - as well as customers' data labeling jobs - associated with this pool. - Customers create specialist pool as well - as start data labeling jobs on Cloud, - managers and labelers work with the jobs - using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.GetSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_specialist_pools(self, - request: specialist_pool_service.ListSpecialistPoolsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSpecialistPoolsAsyncPager: - r"""Lists SpecialistPools in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest`): - The request object. Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - parent (:class:`str`): - Required. The name of the SpecialistPool's parent - resource. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: - Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.ListSpecialistPoolsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_specialist_pools, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListSpecialistPoolsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_specialist_pool(self, - request: specialist_pool_service.DeleteSpecialistPoolRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a SpecialistPool as well as all Specialists - in the pool. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest`): - The request object. Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. - name (:class:`str`): - Required. The resource name of the SpecialistPool to - delete. Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.DeleteSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def update_specialist_pool(self, - request: specialist_pool_service.UpdateSpecialistPoolRequest = None, - *, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a SpecialistPool. - - Args: - request (:class:`google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest`): - The request object. Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. - specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`): - Required. The SpecialistPool which - replaces the resource on the server. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([specialist_pool, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.UpdateSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if specialist_pool is not None: - request.specialist_pool = specialist_pool - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("specialist_pool.name", request.specialist_pool.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "SpecialistPoolServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py deleted file mode 100644 index 609815e067..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py +++ /dev/null @@ -1,837 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import SpecialistPoolServiceGrpcTransport -from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport - - -class SpecialistPoolServiceClientMeta(type): - """Metaclass for the SpecialistPoolService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] - _transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport - _transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[SpecialistPoolServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): - """A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> SpecialistPoolServiceTransport: - """Returns the transport used by the client instance. - - Returns: - SpecialistPoolServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def specialist_pool_path(project: str,location: str,specialist_pool: str,) -> str: - """Returns a fully-qualified specialist_pool string.""" - return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) - - @staticmethod - def parse_specialist_pool_path(path: str) -> Dict[str,str]: - """Parses a specialist_pool path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, SpecialistPoolServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the specialist pool service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, SpecialistPoolServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, SpecialistPoolServiceTransport): - # transport is a SpecialistPoolServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_specialist_pool(self, - request: specialist_pool_service.CreateSpecialistPoolRequest = None, - *, - parent: str = None, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a SpecialistPool. - - Args: - request (google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest): - The request object. Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. - parent (str): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): - Required. The SpecialistPool to - create. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, specialist_pool]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.CreateSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest): - request = specialist_pool_service.CreateSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if specialist_pool is not None: - request.specialist_pool = specialist_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - def get_specialist_pool(self, - request: specialist_pool_service.GetSpecialistPoolRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> specialist_pool.SpecialistPool: - r"""Gets a SpecialistPool. - - Args: - request (google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest): - The request object. Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. - name (str): - Required. The name of the SpecialistPool resource. The - form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.SpecialistPool: - SpecialistPool represents customers' - own workforce to work on their data - labeling jobs. It includes a group of - specialist managers who are responsible - for managing the labelers in this pool - as well as customers' data labeling jobs - associated with this pool. - Customers create specialist pool as well - as start data labeling jobs on Cloud, - managers and labelers work with the jobs - using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.GetSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest): - request = specialist_pool_service.GetSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_specialist_pools(self, - request: specialist_pool_service.ListSpecialistPoolsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSpecialistPoolsPager: - r"""Lists SpecialistPools in a Location. - - Args: - request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): - The request object. Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - parent (str): - Required. The name of the SpecialistPool's parent - resource. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: - Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.ListSpecialistPoolsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest): - request = specialist_pool_service.ListSpecialistPoolsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListSpecialistPoolsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_specialist_pool(self, - request: specialist_pool_service.DeleteSpecialistPoolRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a SpecialistPool as well as all Specialists - in the pool. - - Args: - request (google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest): - The request object. Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. - name (str): - Required. The resource name of the SpecialistPool to - delete. Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.DeleteSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest): - request = specialist_pool_service.DeleteSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def update_specialist_pool(self, - request: specialist_pool_service.UpdateSpecialistPoolRequest = None, - *, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a SpecialistPool. - - Args: - request (google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest): - The request object. Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. - specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): - Required. The SpecialistPool which - replaces the resource on the server. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([specialist_pool, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.UpdateSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest): - request = specialist_pool_service.UpdateSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if specialist_pool is not None: - request.specialist_pool = specialist_pool - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("specialist_pool.name", request.specialist_pool.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "SpecialistPoolServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py deleted file mode 100644 index 8123ab4e42..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service - - -class ListSpecialistPoolsPager: - """A pager for iterating through ``list_specialist_pools`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``specialist_pools`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListSpecialistPools`` requests and continue to iterate - through the ``specialist_pools`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., specialist_pool_service.ListSpecialistPoolsResponse], - request: specialist_pool_service.ListSpecialistPoolsRequest, - response: specialist_pool_service.ListSpecialistPoolsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[specialist_pool_service.ListSpecialistPoolsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[specialist_pool.SpecialistPool]: - for page in self.pages: - yield from page.specialist_pools - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListSpecialistPoolsAsyncPager: - """A pager for iterating through ``list_specialist_pools`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``specialist_pools`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListSpecialistPools`` requests and continue to iterate - through the ``specialist_pools`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]], - request: specialist_pool_service.ListSpecialistPoolsRequest, - response: specialist_pool_service.ListSpecialistPoolsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[specialist_pool_service.ListSpecialistPoolsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[specialist_pool.SpecialistPool]: - async def async_generator(): - async for page in self.pages: - for response in page.specialist_pools: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py deleted file mode 100644 index ba8c9d7eb5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import SpecialistPoolServiceTransport -from .grpc import SpecialistPoolServiceGrpcTransport -from .grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] -_transport_registry['grpc'] = SpecialistPoolServiceGrpcTransport -_transport_registry['grpc_asyncio'] = SpecialistPoolServiceGrpcAsyncIOTransport - -__all__ = ( - 'SpecialistPoolServiceTransport', - 'SpecialistPoolServiceGrpcTransport', - 'SpecialistPoolServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py deleted file mode 100644 index 25948e8039..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py +++ /dev/null @@ -1,232 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class SpecialistPoolServiceTransport(abc.ABC): - """Abstract transport class for SpecialistPoolService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_specialist_pool: gapic_v1.method.wrap_method( - self.create_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - self.get_specialist_pool: gapic_v1.method.wrap_method( - self.get_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - self.list_specialist_pools: gapic_v1.method.wrap_method( - self.list_specialist_pools, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_specialist_pool: gapic_v1.method.wrap_method( - self.delete_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - self.update_specialist_pool: gapic_v1.method.wrap_method( - self.update_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - Union[ - specialist_pool.SpecialistPool, - Awaitable[specialist_pool.SpecialistPool] - ]]: - raise NotImplementedError() - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - Union[ - specialist_pool_service.ListSpecialistPoolsResponse, - Awaitable[specialist_pool_service.ListSpecialistPoolsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'SpecialistPoolServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py deleted file mode 100644 index 799a4a3e8d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore -from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO - - -class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport): - """gRPC backend transport for SpecialistPoolService. - - A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the create specialist pool method over gRPC. - - Creates a SpecialistPool. - - Returns: - Callable[[~.CreateSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_specialist_pool' not in self._stubs: - self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool', - request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_specialist_pool'] - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - specialist_pool.SpecialistPool]: - r"""Return a callable for the get specialist pool method over gRPC. - - Gets a SpecialistPool. - - Returns: - Callable[[~.GetSpecialistPoolRequest], - ~.SpecialistPool]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_specialist_pool' not in self._stubs: - self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool', - request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, - response_deserializer=specialist_pool.SpecialistPool.deserialize, - ) - return self._stubs['get_specialist_pool'] - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - specialist_pool_service.ListSpecialistPoolsResponse]: - r"""Return a callable for the list specialist pools method over gRPC. - - Lists SpecialistPools in a Location. - - Returns: - Callable[[~.ListSpecialistPoolsRequest], - ~.ListSpecialistPoolsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_specialist_pools' not in self._stubs: - self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools', - request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, - response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, - ) - return self._stubs['list_specialist_pools'] - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete specialist pool method over gRPC. - - Deletes a SpecialistPool as well as all Specialists - in the pool. - - Returns: - Callable[[~.DeleteSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_specialist_pool' not in self._stubs: - self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool', - request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_specialist_pool'] - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the update specialist pool method over gRPC. - - Updates a SpecialistPool. - - Returns: - Callable[[~.UpdateSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_specialist_pool' not in self._stubs: - self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool', - request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_specialist_pool'] - - -__all__ = ( - 'SpecialistPoolServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py deleted file mode 100644 index 8073b23e02..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,386 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore -from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import SpecialistPoolServiceGrpcTransport - - -class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport): - """gRPC AsyncIO backend transport for SpecialistPoolService. - - A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create specialist pool method over gRPC. - - Creates a SpecialistPool. - - Returns: - Callable[[~.CreateSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_specialist_pool' not in self._stubs: - self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool', - request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_specialist_pool'] - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - Awaitable[specialist_pool.SpecialistPool]]: - r"""Return a callable for the get specialist pool method over gRPC. - - Gets a SpecialistPool. - - Returns: - Callable[[~.GetSpecialistPoolRequest], - Awaitable[~.SpecialistPool]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_specialist_pool' not in self._stubs: - self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool', - request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, - response_deserializer=specialist_pool.SpecialistPool.deserialize, - ) - return self._stubs['get_specialist_pool'] - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]]: - r"""Return a callable for the list specialist pools method over gRPC. - - Lists SpecialistPools in a Location. - - Returns: - Callable[[~.ListSpecialistPoolsRequest], - Awaitable[~.ListSpecialistPoolsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_specialist_pools' not in self._stubs: - self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools', - request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, - response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, - ) - return self._stubs['list_specialist_pools'] - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete specialist pool method over gRPC. - - Deletes a SpecialistPool as well as all Specialists - in the pool. - - Returns: - Callable[[~.DeleteSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_specialist_pool' not in self._stubs: - self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool', - request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_specialist_pool'] - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update specialist pool method over gRPC. - - Updates a SpecialistPool. - - Returns: - Callable[[~.UpdateSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_specialist_pool' not in self._stubs: - self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool', - request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_specialist_pool'] - - -__all__ = ( - 'SpecialistPoolServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py deleted file mode 100644 index 22169ac9a9..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py +++ /dev/null @@ -1,429 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .annotation import ( - Annotation, -) -from .annotation_spec import ( - AnnotationSpec, -) -from .artifact import ( - Artifact, -) -from .batch_prediction_job import ( - BatchPredictionJob, -) -from .completion_stats import ( - CompletionStats, -) -from .context import ( - Context, -) -from .custom_job import ( - ContainerSpec, - CustomJob, - CustomJobSpec, - PythonPackageSpec, - Scheduling, - WorkerPoolSpec, -) -from .data_item import ( - DataItem, -) -from .data_labeling_job import ( - ActiveLearningConfig, - DataLabelingJob, - SampleConfig, - TrainingConfig, -) -from .dataset import ( - Dataset, - ExportDataConfig, - ImportDataConfig, -) -from .dataset_service import ( - CreateDatasetOperationMetadata, - CreateDatasetRequest, - DeleteDatasetRequest, - ExportDataOperationMetadata, - ExportDataRequest, - ExportDataResponse, - GetAnnotationSpecRequest, - GetDatasetRequest, - ImportDataOperationMetadata, - ImportDataRequest, - ImportDataResponse, - ListAnnotationsRequest, - ListAnnotationsResponse, - ListDataItemsRequest, - ListDataItemsResponse, - ListDatasetsRequest, - ListDatasetsResponse, - UpdateDatasetRequest, -) -from .deployed_model_ref import ( - DeployedModelRef, -) -from .encryption_spec import ( - EncryptionSpec, -) -from .endpoint import ( - DeployedModel, - Endpoint, -) -from .endpoint_service import ( - CreateEndpointOperationMetadata, - CreateEndpointRequest, - DeleteEndpointRequest, - DeployModelOperationMetadata, - DeployModelRequest, - DeployModelResponse, - GetEndpointRequest, - ListEndpointsRequest, - ListEndpointsResponse, - UndeployModelOperationMetadata, - UndeployModelRequest, - UndeployModelResponse, - UpdateEndpointRequest, -) -from .env_var import ( - EnvVar, -) -from .execution import ( - Execution, -) -from .hyperparameter_tuning_job import ( - HyperparameterTuningJob, -) -from .io import ( - BigQueryDestination, - BigQuerySource, - ContainerRegistryDestination, - GcsDestination, - GcsSource, -) -from .job_service import ( - CancelBatchPredictionJobRequest, - CancelCustomJobRequest, - CancelDataLabelingJobRequest, - CancelHyperparameterTuningJobRequest, - CreateBatchPredictionJobRequest, - CreateCustomJobRequest, - CreateDataLabelingJobRequest, - CreateHyperparameterTuningJobRequest, - DeleteBatchPredictionJobRequest, - DeleteCustomJobRequest, - DeleteDataLabelingJobRequest, - DeleteHyperparameterTuningJobRequest, - GetBatchPredictionJobRequest, - GetCustomJobRequest, - GetDataLabelingJobRequest, - GetHyperparameterTuningJobRequest, - ListBatchPredictionJobsRequest, - ListBatchPredictionJobsResponse, - ListCustomJobsRequest, - ListCustomJobsResponse, - ListDataLabelingJobsRequest, - ListDataLabelingJobsResponse, - ListHyperparameterTuningJobsRequest, - ListHyperparameterTuningJobsResponse, -) -from .machine_resources import ( - AutomaticResources, - AutoscalingMetricSpec, - BatchDedicatedResources, - DedicatedResources, - DiskSpec, - MachineSpec, - ResourcesConsumed, -) -from .manual_batch_tuning_parameters import ( - ManualBatchTuningParameters, -) -from .migratable_resource import ( - MigratableResource, -) -from .migration_service import ( - BatchMigrateResourcesOperationMetadata, - BatchMigrateResourcesRequest, - BatchMigrateResourcesResponse, - MigrateResourceRequest, - MigrateResourceResponse, - SearchMigratableResourcesRequest, - SearchMigratableResourcesResponse, -) -from .model import ( - Model, - ModelContainerSpec, - Port, - PredictSchemata, -) -from .model_evaluation import ( - ModelEvaluation, -) -from .model_evaluation_slice import ( - ModelEvaluationSlice, -) -from .model_service import ( - DeleteModelRequest, - ExportModelOperationMetadata, - ExportModelRequest, - ExportModelResponse, - GetModelEvaluationRequest, - GetModelEvaluationSliceRequest, - GetModelRequest, - ListModelEvaluationSlicesRequest, - ListModelEvaluationSlicesResponse, - ListModelEvaluationsRequest, - ListModelEvaluationsResponse, - ListModelsRequest, - ListModelsResponse, - UpdateModelRequest, - UploadModelOperationMetadata, - UploadModelRequest, - UploadModelResponse, -) -from .operation import ( - DeleteOperationMetadata, - GenericOperationMetadata, -) -from .pipeline_job import ( - PipelineJob, - PipelineJobDetail, - PipelineTaskDetail, - PipelineTaskExecutorDetail, -) -from .pipeline_service import ( - CancelPipelineJobRequest, - CancelTrainingPipelineRequest, - CreatePipelineJobRequest, - CreateTrainingPipelineRequest, - DeletePipelineJobRequest, - DeleteTrainingPipelineRequest, - GetPipelineJobRequest, - GetTrainingPipelineRequest, - ListPipelineJobsRequest, - ListPipelineJobsResponse, - ListTrainingPipelinesRequest, - ListTrainingPipelinesResponse, -) -from .prediction_service import ( - PredictRequest, - PredictResponse, -) -from .specialist_pool import ( - SpecialistPool, -) -from .specialist_pool_service import ( - CreateSpecialistPoolOperationMetadata, - CreateSpecialistPoolRequest, - DeleteSpecialistPoolRequest, - GetSpecialistPoolRequest, - ListSpecialistPoolsRequest, - ListSpecialistPoolsResponse, - UpdateSpecialistPoolOperationMetadata, - UpdateSpecialistPoolRequest, -) -from .study import ( - Measurement, - StudySpec, - Trial, -) -from .training_pipeline import ( - FilterSplit, - FractionSplit, - InputDataConfig, - PredefinedSplit, - TimestampSplit, - TrainingPipeline, -) -from .user_action_reference import ( - UserActionReference, -) -from .value import ( - Value, -) - -__all__ = ( - 'AcceleratorType', - 'Annotation', - 'AnnotationSpec', - 'Artifact', - 'BatchPredictionJob', - 'CompletionStats', - 'Context', - 'ContainerSpec', - 'CustomJob', - 'CustomJobSpec', - 'PythonPackageSpec', - 'Scheduling', - 'WorkerPoolSpec', - 'DataItem', - 'ActiveLearningConfig', - 'DataLabelingJob', - 'SampleConfig', - 'TrainingConfig', - 'Dataset', - 'ExportDataConfig', - 'ImportDataConfig', - 'CreateDatasetOperationMetadata', - 'CreateDatasetRequest', - 'DeleteDatasetRequest', - 'ExportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'ImportDataOperationMetadata', - 'ImportDataRequest', - 'ImportDataResponse', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeployedModelRef', - 'EncryptionSpec', - 'DeployedModel', - 'Endpoint', - 'CreateEndpointOperationMetadata', - 'CreateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelOperationMetadata', - 'DeployModelRequest', - 'DeployModelResponse', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UndeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UpdateEndpointRequest', - 'EnvVar', - 'Execution', - 'HyperparameterTuningJob', - 'BigQueryDestination', - 'BigQuerySource', - 'ContainerRegistryDestination', - 'GcsDestination', - 'GcsSource', - 'CancelBatchPredictionJobRequest', - 'CancelCustomJobRequest', - 'CancelDataLabelingJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'CreateCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'DeleteBatchPredictionJobRequest', - 'DeleteCustomJobRequest', - 'DeleteDataLabelingJobRequest', - 'DeleteHyperparameterTuningJobRequest', - 'GetBatchPredictionJobRequest', - 'GetCustomJobRequest', - 'GetDataLabelingJobRequest', - 'GetHyperparameterTuningJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'JobState', - 'AutomaticResources', - 'AutoscalingMetricSpec', - 'BatchDedicatedResources', - 'DedicatedResources', - 'DiskSpec', - 'MachineSpec', - 'ResourcesConsumed', - 'ManualBatchTuningParameters', - 'MigratableResource', - 'BatchMigrateResourcesOperationMetadata', - 'BatchMigrateResourcesRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceRequest', - 'MigrateResourceResponse', - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'Model', - 'ModelContainerSpec', - 'Port', - 'PredictSchemata', - 'ModelEvaluation', - 'ModelEvaluationSlice', - 'DeleteModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelRequest', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'GetModelEvaluationSliceRequest', - 'GetModelRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelRequest', - 'UploadModelResponse', - 'DeleteOperationMetadata', - 'GenericOperationMetadata', - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - 'CancelPipelineJobRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'CreateTrainingPipelineRequest', - 'DeletePipelineJobRequest', - 'DeleteTrainingPipelineRequest', - 'GetPipelineJobRequest', - 'GetTrainingPipelineRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'PipelineState', - 'PredictRequest', - 'PredictResponse', - 'SpecialistPool', - 'CreateSpecialistPoolOperationMetadata', - 'CreateSpecialistPoolRequest', - 'DeleteSpecialistPoolRequest', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'UpdateSpecialistPoolOperationMetadata', - 'UpdateSpecialistPoolRequest', - 'Measurement', - 'StudySpec', - 'Trial', - 'FilterSplit', - 'FractionSplit', - 'InputDataConfig', - 'PredefinedSplit', - 'TimestampSplit', - 'TrainingPipeline', - 'UserActionReference', - 'Value', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py deleted file mode 100644 index 2c89d89653..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'AcceleratorType', - }, -) - - -class AcceleratorType(proto.Enum): - r"""Represents a hardware accelerator type.""" - ACCELERATOR_TYPE_UNSPECIFIED = 0 - NVIDIA_TESLA_K80 = 1 - NVIDIA_TESLA_P100 = 2 - NVIDIA_TESLA_V100 = 3 - NVIDIA_TESLA_P4 = 4 - NVIDIA_TESLA_T4 = 5 - NVIDIA_TESLA_A100 = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py deleted file mode 100644 index 342d33e976..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import user_action_reference -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Annotation', - }, -) - - -class Annotation(proto.Message): - r"""Used to assign specific AnnotationSpec to a particular area - of a DataItem or the whole part of the DataItem. - - Attributes: - name (str): - Output only. Resource name of the Annotation. - payload_schema_uri (str): - Required. Google Cloud Storage URI points to a YAML file - describing - [payload][google.cloud.aiplatform.v1.Annotation.payload]. - The schema is defined as an `OpenAPI 3.0.2 Schema - Object `__. - The schema files that can be used here are found in - gs://google-cloud-aiplatform/schema/dataset/annotation/, - note that the chosen schema must be consistent with the - parent Dataset's - [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri]. - payload (google.protobuf.struct_pb2.Value): - Required. The schema of the payload can be found in - [payload_schema][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Annotation - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Annotation - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - annotation_source (google.cloud.aiplatform_v1.types.UserActionReference): - Output only. The source of the Annotation. - labels (Sequence[google.cloud.aiplatform_v1.types.Annotation.LabelsEntry]): - Optional. The labels with user-defined metadata to organize - your Annotations. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Annotation(System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Annotation: - - - "aiplatform.googleapis.com/annotation_set_name": - optional, name of the UI's annotation set this Annotation - belongs to. If not set, the Annotation is not visible in - the UI. - - - "aiplatform.googleapis.com/payload_schema": output only, - its value is the - [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri] - title. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - payload_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - payload = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - annotation_source = proto.Field( - proto.MESSAGE, - number=5, - message=user_action_reference.UserActionReference, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py deleted file mode 100644 index 950abfe6c4..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'AnnotationSpec', - }, -) - - -class AnnotationSpec(proto.Message): - r"""Identifies a concept with which DataItems may be annotated - with. - - Attributes: - name (str): - Output only. Resource name of the - AnnotationSpec. - display_name (str): - Required. The user-defined name of the - AnnotationSpec. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - AnnotationSpec was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when AnnotationSpec - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py deleted file mode 100644 index abeb95f208..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ /dev/null @@ -1,407 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import completion_stats as gca_completion_stats -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'BatchPredictionJob', - }, -) - - -class BatchPredictionJob(proto.Message): - r"""A job that uses a - [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to - produce predictions on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the instances fail, the - job may finish without attempting predictions for all remaining - instances. - - Attributes: - name (str): - Output only. Resource name of the - BatchPredictionJob. - display_name (str): - Required. The user-defined name of this - BatchPredictionJob. - model (str): - Required. The name of the Model that produces - the predictions via this job, must share the - same ancestor Location. Starting this job has no - impact on any existing deployments of the Model - and their resources. - input_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.InputConfig): - Required. Input configuration of the instances on which - predictions are performed. The schema of any single instance - may be specified via the - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - model_parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the predictions. The schema of - the parameters may be specified via the - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - output_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputConfig): - Required. The Configuration specifying where output - predictions should be written. The schema of any single - prediction may be specified as a concatenation of - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - and - [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. - dedicated_resources (google.cloud.aiplatform_v1.types.BatchDedicatedResources): - The config of resources used by the Model during the batch - prediction. If the Model - [supports][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types] - DEDICATED_RESOURCES this config may be provided (and the job - will use these resources), if the Model doesn't support - AUTOMATIC_RESOURCES, this config must be provided. - manual_batch_tuning_parameters (google.cloud.aiplatform_v1.types.ManualBatchTuningParameters): - Immutable. Parameters configuring the batch behavior. - Currently only applicable when - [dedicated_resources][google.cloud.aiplatform.v1.BatchPredictionJob.dedicated_resources] - are used (in other cases Vertex AI does the tuning itself). - output_info (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputInfo): - Output only. Information further describing - the output of this job. - state (google.cloud.aiplatform_v1.types.JobState): - Output only. The detailed state of the job. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the job's state is - JOB_STATE_FAILED or JOB_STATE_CANCELLED. - partial_failures (Sequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - For example, single files that can't be read. - This field never exceeds 20 entries. - Status details fields contain standard GCP error - details. - resources_consumed (google.cloud.aiplatform_v1.types.ResourcesConsumed): - Output only. Information about resources that - had been consumed by this job. Provided in real - time at best effort basis, as well as a final - value once the job completes. - - Note: This field currently may be not populated - for batch predictions that use AutoML Models. - completion_stats (google.cloud.aiplatform_v1.types.CompletionStats): - Output only. Statistics on completed and - failed prediction instances. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob - was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob for the first - time entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob entered any of - the following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob.LabelsEntry]): - The labels with user-defined metadata to - organize BatchPredictionJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key options for a - BatchPredictionJob. If this is set, then all - resources created by the BatchPredictionJob will - be encrypted with the provided encryption key. - """ - - class InputConfig(proto.Message): - r"""Configures the input to - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - See - [Model.supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] - for Model's supported input formats, and how instances should be - expressed via any of them. - - Attributes: - gcs_source (google.cloud.aiplatform_v1.types.GcsSource): - The Cloud Storage location for the input - instances. - bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource): - The BigQuery location of the input table. - The schema of the table should be in the format - described by the given context OpenAPI Schema, - if one is provided. The table may contain - additional columns that are not described by the - schema, and they will be ignored. - instances_format (str): - Required. The format in which instances are given, must be - one of the - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats]. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=2, - oneof='source', - message=io.GcsSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=3, - oneof='source', - message=io.BigQuerySource, - ) - instances_format = proto.Field( - proto.STRING, - number=1, - ) - - class OutputConfig(proto.Message): - r"""Configures the output of - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - See - [Model.supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats] - for supported output formats, and how predictions are expressed via - any of them. - - Attributes: - gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): - The Cloud Storage location of the directory where the output - is to be written to. In the given directory a new directory - is created. Its name is - ``prediction--``, where - timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. - Inside of it files ``predictions_0001.``, - ``predictions_0002.``, ..., - ``predictions_N.`` are created where - ```` depends on chosen - [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format], - and N may equal 0001 and depends on the total number of - successfully predicted instances. If the Model has both - [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - and - [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] - schemata defined then each such file contains predictions as - per the - [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format]. - If prediction for any instance failed (partially or - completely), then an additional ``errors_0001.``, - ``errors_0002.``,..., ``errors_N.`` - files are created (N depends on total number of failed - predictions). These files contain the failed instances, as - per their schema, followed by an additional ``error`` field - which as value has [google.rpc.Status][google.rpc.Status] - containing only ``code`` and ``message`` fields. - bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): - The BigQuery project or dataset location where the output is - to be written to. If project is provided, a new dataset is - created with name - ``prediction__`` where - is made BigQuery-dataset-name compatible (for example, most - special characters become underscores), and timestamp is in - YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the - dataset two tables will be created, ``predictions``, and - ``errors``. If the Model has both - [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - and - [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] - schemata defined then the tables have columns as follows: - The ``predictions`` table contains instances for which the - prediction succeeded, it has columns as per a concatenation - of the Model's instance and prediction schemata. The - ``errors`` table contains rows for which the prediction has - failed, it has instance columns, as per the instance schema, - followed by a single "errors" column, which as values has - [google.rpc.Status][google.rpc.Status] represented as a - STRUCT, and containing only ``code`` and ``message``. - predictions_format (str): - Required. The format in which Vertex AI gives the - predictions, must be one of the - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message=io.GcsDestination, - ) - bigquery_destination = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message=io.BigQueryDestination, - ) - predictions_format = proto.Field( - proto.STRING, - number=1, - ) - - class OutputInfo(proto.Message): - r"""Further describes this job's output. Supplements - [output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. - - Attributes: - gcs_output_directory (str): - Output only. The full path of the Cloud - Storage directory created, into which the - prediction output is written. - bigquery_output_dataset (str): - Output only. The path of the BigQuery dataset created, in - ``bq://projectId.bqDatasetId`` format, into which the - prediction output is written. - bigquery_output_table (str): - Output only. The name of the BigQuery table created, in - ``predictions_`` format, into which the - prediction output is written. Can be used by UI to generate - the BigQuery output path, for example. - """ - - gcs_output_directory = proto.Field( - proto.STRING, - number=1, - oneof='output_location', - ) - bigquery_output_dataset = proto.Field( - proto.STRING, - number=2, - oneof='output_location', - ) - bigquery_output_table = proto.Field( - proto.STRING, - number=4, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - model = proto.Field( - proto.STRING, - number=3, - ) - input_config = proto.Field( - proto.MESSAGE, - number=4, - message=InputConfig, - ) - model_parameters = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.Value, - ) - output_config = proto.Field( - proto.MESSAGE, - number=6, - message=OutputConfig, - ) - dedicated_resources = proto.Field( - proto.MESSAGE, - number=7, - message=machine_resources.BatchDedicatedResources, - ) - manual_batch_tuning_parameters = proto.Field( - proto.MESSAGE, - number=8, - message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters, - ) - output_info = proto.Field( - proto.MESSAGE, - number=9, - message=OutputInfo, - ) - state = proto.Field( - proto.ENUM, - number=10, - enum=job_state.JobState, - ) - error = proto.Field( - proto.MESSAGE, - number=11, - message=status_pb2.Status, - ) - partial_failures = proto.RepeatedField( - proto.MESSAGE, - number=12, - message=status_pb2.Status, - ) - resources_consumed = proto.Field( - proto.MESSAGE, - number=13, - message=machine_resources.ResourcesConsumed, - ) - completion_stats = proto.Field( - proto.MESSAGE, - number=14, - message=gca_completion_stats.CompletionStats, - ) - create_time = proto.Field( - proto.MESSAGE, - number=15, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=16, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=17, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=18, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=19, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=24, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py deleted file mode 100644 index 289efbc59b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CompletionStats', - }, -) - - -class CompletionStats(proto.Message): - r"""Success and error statistics of processing multiple entities - (for example, DataItems or structured data rows) in batch. - - Attributes: - successful_count (int): - Output only. The number of entities that had - been processed successfully. - failed_count (int): - Output only. The number of entities for which - any error was encountered. - incomplete_count (int): - Output only. In cases when enough errors are - encountered a job, pipeline, or operation may be - failed as a whole. Below is the number of - entities for which the processing had not been - finished (either in successful or failed state). - Set to -1 if the number is unknown (for example, - the operation failed before the total entity - number could be collected). - """ - - successful_count = proto.Field( - proto.INT64, - number=1, - ) - failed_count = proto.Field( - proto.INT64, - number=2, - ) - incomplete_count = proto.Field( - proto.INT64, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py deleted file mode 100644 index a7fa52d537..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py +++ /dev/null @@ -1,391 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CustomJob', - 'CustomJobSpec', - 'WorkerPoolSpec', - 'ContainerSpec', - 'PythonPackageSpec', - 'Scheduling', - }, -) - - -class CustomJob(proto.Message): - r"""Represents a job that runs custom workloads such as a Docker - container or a Python package. A CustomJob can have multiple - worker pools and each worker pool can have its own machine and - input spec. A CustomJob will be cleaned up once the job enters - terminal state (failed or succeeded). - - Attributes: - name (str): - Output only. Resource name of a CustomJob. - display_name (str): - Required. The display name of the CustomJob. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): - Required. Job spec. - state (google.cloud.aiplatform_v1.types.JobState): - Output only. The detailed state of the job. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob was - created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob for the first time - entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob entered any of the - following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob was most - recently updated. - error (google.rpc.status_pb2.Status): - Output only. Only populated when job's state is - ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1.types.CustomJob.LabelsEntry]): - The labels with user-defined metadata to - organize CustomJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key options for a - CustomJob. If this is set, then all resources - created by the CustomJob will be encrypted with - the provided encryption key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - job_spec = proto.Field( - proto.MESSAGE, - number=4, - message='CustomJobSpec', - ) - state = proto.Field( - proto.ENUM, - number=5, - enum=job_state.JobState, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=12, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class CustomJobSpec(proto.Message): - r"""Represents the spec of a CustomJob. - Attributes: - worker_pool_specs (Sequence[google.cloud.aiplatform_v1.types.WorkerPoolSpec]): - Required. The spec of the worker pools - including machine type and Docker image. All - worker pools except the first one are optional - and can be skipped by providing an empty value. - scheduling (google.cloud.aiplatform_v1.types.Scheduling): - Scheduling options for a CustomJob. - service_account (str): - Specifies the service account for workload run-as account. - Users submitting jobs must have act-as permission on this - run-as account. If unspecified, the `AI Platform Custom Code - Service - Agent `__ - for the CustomJob's project is used. - network (str): - The full name of the Compute Engine - `network `__ - to which the Job should be peered. For example, - ``projects/12345/global/networks/myVPC``. - `Format `__ - is of the form - ``projects/{project}/global/networks/{network}``. Where - {project} is a project number, as in ``12345``, and - {network} is a network name. - - Private services access must already be configured for the - network. If left unspecified, the job is not peered with any - network. - base_output_directory (google.cloud.aiplatform_v1.types.GcsDestination): - The Cloud Storage location to store the output of this - CustomJob or HyperparameterTuningJob. For - HyperparameterTuningJob, the baseOutputDirectory of each - child CustomJob backing a Trial is set to a subdirectory of - name [id][google.cloud.aiplatform.v1.Trial.id] under its - parent HyperparameterTuningJob's baseOutputDirectory. - - The following Vertex AI environment variables will be passed - to containers or python modules when this field is set: - - For CustomJob: - - - AIP_MODEL_DIR = ``/model/`` - - AIP_CHECKPOINT_DIR = - ``/checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``/logs/`` - - For CustomJob backing a Trial of HyperparameterTuningJob: - - - AIP_MODEL_DIR = - ``//model/`` - - AIP_CHECKPOINT_DIR = - ``//checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``//logs/`` - """ - - worker_pool_specs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='WorkerPoolSpec', - ) - scheduling = proto.Field( - proto.MESSAGE, - number=3, - message='Scheduling', - ) - service_account = proto.Field( - proto.STRING, - number=4, - ) - network = proto.Field( - proto.STRING, - number=5, - ) - base_output_directory = proto.Field( - proto.MESSAGE, - number=6, - message=io.GcsDestination, - ) - - -class WorkerPoolSpec(proto.Message): - r"""Represents the spec of a worker pool in a job. - Attributes: - container_spec (google.cloud.aiplatform_v1.types.ContainerSpec): - The custom container task. - python_package_spec (google.cloud.aiplatform_v1.types.PythonPackageSpec): - The Python packaged task. - machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): - Optional. Immutable. The specification of a - single machine. - replica_count (int): - Optional. The number of worker replicas to - use for this worker pool. - disk_spec (google.cloud.aiplatform_v1.types.DiskSpec): - Disk spec. - """ - - container_spec = proto.Field( - proto.MESSAGE, - number=6, - oneof='task', - message='ContainerSpec', - ) - python_package_spec = proto.Field( - proto.MESSAGE, - number=7, - oneof='task', - message='PythonPackageSpec', - ) - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message=machine_resources.MachineSpec, - ) - replica_count = proto.Field( - proto.INT64, - number=2, - ) - disk_spec = proto.Field( - proto.MESSAGE, - number=5, - message=machine_resources.DiskSpec, - ) - - -class ContainerSpec(proto.Message): - r"""The spec of a Container. - Attributes: - image_uri (str): - Required. The URI of a container image in the - Container Registry that is to be run on each - worker replica. - command (Sequence[str]): - The command to be invoked when the container - is started. It overrides the entrypoint - instruction in Dockerfile when provided. - args (Sequence[str]): - The arguments to be passed when starting the - container. - env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): - Environment variables to be passed to the - container. - """ - - image_uri = proto.Field( - proto.STRING, - number=1, - ) - command = proto.RepeatedField( - proto.STRING, - number=2, - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - env = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=env_var.EnvVar, - ) - - -class PythonPackageSpec(proto.Message): - r"""The spec of a Python packaged code. - Attributes: - executor_image_uri (str): - Required. The URI of a container image in Artifact Registry - that will run the provided Python package. Vertex AI - provides a wide range of executor images with pre-installed - packages to meet users' various use cases. See the list of - `pre-built containers for - training `__. - You must use an image from this list. - package_uris (Sequence[str]): - Required. The Google Cloud Storage location - of the Python package files which are the - training program and its dependent packages. The - maximum number of package URIs is 100. - python_module (str): - Required. The Python module name to run after - installing the packages. - args (Sequence[str]): - Command line arguments to be passed to the - Python task. - env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): - Environment variables to be passed to the - python module. - """ - - executor_image_uri = proto.Field( - proto.STRING, - number=1, - ) - package_uris = proto.RepeatedField( - proto.STRING, - number=2, - ) - python_module = proto.Field( - proto.STRING, - number=3, - ) - args = proto.RepeatedField( - proto.STRING, - number=4, - ) - env = proto.RepeatedField( - proto.MESSAGE, - number=5, - message=env_var.EnvVar, - ) - - -class Scheduling(proto.Message): - r"""All parameters related to queuing and scheduling of custom - jobs. - - Attributes: - timeout (google.protobuf.duration_pb2.Duration): - The maximum job running time. The default is - 7 days. - restart_job_on_worker_restart (bool): - Restarts the entire CustomJob if a worker - gets restarted. This feature can be used by - distributed training jobs that are not resilient - to workers leaving and joining a job. - """ - - timeout = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - restart_job_on_worker_restart = proto.Field( - proto.BOOL, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py deleted file mode 100644 index 0ec4a5901e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'DataItem', - }, -) - - -class DataItem(proto.Message): - r"""A piece of data in a Dataset. Could be an image, a video, a - document or plain text. - - Attributes: - name (str): - Output only. The resource name of the - DataItem. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this DataItem was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this DataItem was - last updated. - labels (Sequence[google.cloud.aiplatform_v1.types.DataItem.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your DataItems. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one DataItem(System labels are - excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - payload (google.protobuf.struct_pb2.Value): - Required. The data that the DataItem represents (for - example, an image or a text snippet). The schema of the - payload is stored in the parent Dataset's [metadata - schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] - dataItemSchemaUri field. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=3, - ) - payload = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py deleted file mode 100644 index f072ecc502..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py +++ /dev/null @@ -1,332 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import job_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'DataLabelingJob', - 'ActiveLearningConfig', - 'SampleConfig', - 'TrainingConfig', - }, -) - - -class DataLabelingJob(proto.Message): - r"""DataLabelingJob is used to trigger a human labeling job on - unlabeled data from the following Dataset: - - Attributes: - name (str): - Output only. Resource name of the - DataLabelingJob. - display_name (str): - Required. The user-defined name of the - DataLabelingJob. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - Display name of a DataLabelingJob. - datasets (Sequence[str]): - Required. Dataset resource names. Right now we only support - labeling from a single Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - annotation_labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.AnnotationLabelsEntry]): - Labels to assign to annotations generated by - this DataLabelingJob. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. See https://goo.gl/xmQnxf for more - information and examples of labels. System - reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - labeler_count (int): - Required. Number of labelers to work on each - DataItem. - instruction_uri (str): - Required. The Google Cloud Storage location - of the instruction pdf. This pdf is shared with - labelers, and provides detailed description on - how to label DataItems in Datasets. - inputs_schema_uri (str): - Required. Points to a YAML file stored on - Google Cloud Storage describing the config for a - specific type of DataLabelingJob. The schema - files that can be used here are found in the - https://storage.googleapis.com/google-cloud- - aiplatform bucket in the - /schema/datalabelingjob/inputs/ folder. - inputs (google.protobuf.struct_pb2.Value): - Required. Input config parameters for the - DataLabelingJob. - state (google.cloud.aiplatform_v1.types.JobState): - Output only. The detailed state of the job. - labeling_progress (int): - Output only. Current labeling job progress percentage scaled - in interval [0, 100], indicating the percentage of DataItems - that has been finished. - current_spend (google.type.money_pb2.Money): - Output only. Estimated cost(in US dollars) - that the DataLabelingJob has incurred to date. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - DataLabelingJob was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - DataLabelingJob was updated most recently. - error (google.rpc.status_pb2.Status): - Output only. DataLabelingJob errors. It is only populated - when job's state is ``JOB_STATE_FAILED`` or - ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.LabelsEntry]): - The labels with user-defined metadata to organize your - DataLabelingJobs. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each DataLabelingJob: - - - "aiplatform.googleapis.com/schema": output only, its - value is the - [inputs_schema][google.cloud.aiplatform.v1.DataLabelingJob.inputs_schema_uri]'s - title. - specialist_pools (Sequence[str]): - The SpecialistPools' resource names - associated with this job. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - DataLabelingJob. If set, this DataLabelingJob - will be secured by this key. - Note: Annotations created in the DataLabelingJob - are associated with the EncryptionSpec of the - Dataset they are exported to. - active_learning_config (google.cloud.aiplatform_v1.types.ActiveLearningConfig): - Parameters that configure the active learning - pipeline. Active learning will label the data - incrementally via several iterations. For every - iteration, it will select a batch of data based - on the sampling strategy. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - datasets = proto.RepeatedField( - proto.STRING, - number=3, - ) - annotation_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=12, - ) - labeler_count = proto.Field( - proto.INT32, - number=4, - ) - instruction_uri = proto.Field( - proto.STRING, - number=5, - ) - inputs_schema_uri = proto.Field( - proto.STRING, - number=6, - ) - inputs = proto.Field( - proto.MESSAGE, - number=7, - message=struct_pb2.Value, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=job_state.JobState, - ) - labeling_progress = proto.Field( - proto.INT32, - number=13, - ) - current_spend = proto.Field( - proto.MESSAGE, - number=14, - message=money_pb2.Money, - ) - create_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=22, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - specialist_pools = proto.RepeatedField( - proto.STRING, - number=16, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=20, - message=gca_encryption_spec.EncryptionSpec, - ) - active_learning_config = proto.Field( - proto.MESSAGE, - number=21, - message='ActiveLearningConfig', - ) - - -class ActiveLearningConfig(proto.Message): - r"""Parameters that configure the active learning pipeline. - Active learning will label the data incrementally by several - iterations. For every iteration, it will select a batch of data - based on the sampling strategy. - - Attributes: - max_data_item_count (int): - Max number of human labeled DataItems. - max_data_item_percentage (int): - Max percent of total DataItems for human - labeling. - sample_config (google.cloud.aiplatform_v1.types.SampleConfig): - Active learning data sampling config. For - every active learning labeling iteration, it - will select a batch of data based on the - sampling strategy. - training_config (google.cloud.aiplatform_v1.types.TrainingConfig): - CMLE training config. For every active - learning labeling iteration, system will train a - machine learning model on CMLE. The trained - model will be used by data sampling algorithm to - select DataItems. - """ - - max_data_item_count = proto.Field( - proto.INT64, - number=1, - oneof='human_labeling_budget', - ) - max_data_item_percentage = proto.Field( - proto.INT32, - number=2, - oneof='human_labeling_budget', - ) - sample_config = proto.Field( - proto.MESSAGE, - number=3, - message='SampleConfig', - ) - training_config = proto.Field( - proto.MESSAGE, - number=4, - message='TrainingConfig', - ) - - -class SampleConfig(proto.Message): - r"""Active learning data sampling config. For every active - learning labeling iteration, it will select a batch of data - based on the sampling strategy. - - Attributes: - initial_batch_sample_percentage (int): - The percentage of data needed to be labeled - in the first batch. - following_batch_sample_percentage (int): - The percentage of data needed to be labeled - in each following batch (except the first - batch). - sample_strategy (google.cloud.aiplatform_v1.types.SampleConfig.SampleStrategy): - Field to choose sampling strategy. Sampling - strategy will decide which data should be - selected for human labeling in every batch. - """ - class SampleStrategy(proto.Enum): - r"""Sample strategy decides which subset of DataItems should be - selected for human labeling in every batch. - """ - SAMPLE_STRATEGY_UNSPECIFIED = 0 - UNCERTAINTY = 1 - - initial_batch_sample_percentage = proto.Field( - proto.INT32, - number=1, - oneof='initial_batch_sample_size', - ) - following_batch_sample_percentage = proto.Field( - proto.INT32, - number=3, - oneof='following_batch_sample_size', - ) - sample_strategy = proto.Field( - proto.ENUM, - number=5, - enum=SampleStrategy, - ) - - -class TrainingConfig(proto.Message): - r"""CMLE training config. For every active learning labeling - iteration, system will train a machine learning model on CMLE. - The trained model will be used by data sampling algorithm to - select DataItems. - - Attributes: - timeout_training_milli_hours (int): - The timeout hours for the CMLE training job, - expressed in milli hours i.e. 1,000 value in - this field means 1 hour. - """ - - timeout_training_milli_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py deleted file mode 100644 index 11c8574659..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import io -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Dataset', - 'ImportDataConfig', - 'ExportDataConfig', - }, -) - - -class Dataset(proto.Message): - r"""A collection of DataItems and Annotations on them. - Attributes: - name (str): - Output only. The resource name of the - Dataset. - display_name (str): - Required. The user-defined name of the - Dataset. The name can be up to 128 characters - long and can be consist of any UTF-8 characters. - metadata_schema_uri (str): - Required. Points to a YAML file stored on - Google Cloud Storage describing additional - information about the Dataset. The schema is - defined as an OpenAPI 3.0.2 Schema Object. The - schema files that can be used here are found in - gs://google-cloud- - aiplatform/schema/dataset/metadata/. - metadata (google.protobuf.struct_pb2.Value): - Required. Additional information about the - Dataset. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Dataset was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Dataset was - last updated. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Dataset.LabelsEntry]): - The labels with user-defined metadata to organize your - Datasets. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Dataset (System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Dataset: - - - "aiplatform.googleapis.com/dataset_metadata_schema": - output only, its value is the - [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] - title. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Dataset. If set, this Dataset and all sub- - resources of this Dataset will be secured by - this key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - metadata = proto.Field( - proto.MESSAGE, - number=8, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=6, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=11, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class ImportDataConfig(proto.Message): - r"""Describes the location from where we import data into a - Dataset, together with the labels that will be applied to the - DataItems and the Annotations. - - Attributes: - gcs_source (google.cloud.aiplatform_v1.types.GcsSource): - The Google Cloud Storage location for the - input content. - data_item_labels (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig.DataItemLabelsEntry]): - Labels that will be applied to newly imported DataItems. If - an identical DataItem as one being imported already exists - in the Dataset, then these labels will be appended to these - of the already existing one, and if labels with identical - key is imported before, the old label value will be - overwritten. If two DataItems are identical in the same - import data operation, the labels will be combined and if - key collision happens in this case, one of the values will - be picked randomly. Two DataItems are considered identical - if their content bytes are identical (e.g. image bytes or - pdf bytes). These labels will be overridden by Annotation - labels specified inside index file referenced by - [import_schema_uri][google.cloud.aiplatform.v1.ImportDataConfig.import_schema_uri], - e.g. jsonl file. - import_schema_uri (str): - Required. Points to a YAML file stored on Google Cloud - Storage describing the import format. Validation will be - done against the schema. The schema is defined as an - `OpenAPI 3.0.2 Schema - Object `__. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - oneof='source', - message=io.GcsSource, - ) - data_item_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - import_schema_uri = proto.Field( - proto.STRING, - number=4, - ) - - -class ExportDataConfig(proto.Message): - r"""Describes what part of the Dataset is to be exported, the - destination of the export and how to export. - - Attributes: - gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): - The Google Cloud Storage location where the output is to be - written to. In the given directory a new directory will be - created with name: - ``export-data--`` - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format. All export output will be written into that - directory. Inside that directory, annotations with the same - schema will be grouped into sub directories which are named - with the corresponding annotations' schema title. Inside - these sub directories, a schema.yaml will be created to - describe the output format. - annotations_filter (str): - A filter on Annotations of the Dataset. Only Annotations on - to-be-exported DataItems(specified by [data_items_filter][]) - that match this filter will be exported. The filter syntax - is the same as in - [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message=io.GcsDestination, - ) - annotations_filter = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py deleted file mode 100644 index bcd7ca90bf..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py +++ /dev/null @@ -1,542 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateDatasetRequest', - 'CreateDatasetOperationMetadata', - 'GetDatasetRequest', - 'UpdateDatasetRequest', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'DeleteDatasetRequest', - 'ImportDataRequest', - 'ImportDataResponse', - 'ImportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'ExportDataOperationMetadata', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'GetAnnotationSpecRequest', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - }, -) - - -class CreateDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Dataset in. Format: - ``projects/{project}/locations/{location}`` - dataset (google.cloud.aiplatform_v1.types.Dataset): - Required. The Dataset to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - dataset = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.Dataset, - ) - - -class CreateDatasetOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - - Attributes: - name (str): - Required. The name of the Dataset resource. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. - - Attributes: - dataset (google.cloud.aiplatform_v1.types.Dataset): - Required. The Dataset which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - """ - - dataset = proto.Field( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListDatasetsRequest(proto.Message): - r"""Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - - Attributes: - parent (str): - Required. The name of the Dataset's parent resource. Format: - ``projects/{project}/locations/{location}`` - filter (str): - An expression for filtering the results of the request. For - field names both snake_case and camelCase are supported. - - - ``display_name``: supports = and != - - ``metadata_schema_uri``: supports = and != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``display_name`` - - ``create_time`` - - ``update_time`` - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDatasetsResponse(proto.Message): - r"""Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - - Attributes: - datasets (Sequence[google.cloud.aiplatform_v1.types.Dataset]): - A list of Datasets that matches the specified - filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - datasets = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. - - Attributes: - name (str): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ImportDataRequest(proto.Message): - r"""Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - - Attributes: - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]): - Required. The desired input locations. The - contents of all input locations will be imported - in one batch. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - import_configs = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=gca_dataset.ImportDataConfig, - ) - - -class ImportDataResponse(proto.Message): - r"""Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - """ - - -class ImportDataOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class ExportDataRequest(proto.Message): - r"""Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - Attributes: - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - export_config (google.cloud.aiplatform_v1.types.ExportDataConfig): - Required. The desired output location. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - export_config = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.ExportDataConfig, - ) - - -class ExportDataResponse(proto.Message): - r"""Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - Attributes: - exported_files (Sequence[str]): - All of the files that are exported in this - export operation. - """ - - exported_files = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class ExportDataOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - gcs_output_directory (str): - A Google Cloud Storage directory which path - ends with '/'. The exported data is stored in - the directory. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - gcs_output_directory = proto.Field( - proto.STRING, - number=2, - ) - - -class ListDataItemsRequest(proto.Message): - r"""Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - - Attributes: - parent (str): - Required. The resource name of the Dataset to list DataItems - from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, - sorted in ascending order. Use "desc" after a - field name for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDataItemsResponse(proto.Message): - r"""Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - - Attributes: - data_items (Sequence[google.cloud.aiplatform_v1.types.DataItem]): - A list of DataItems that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - data_items = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=data_item.DataItem, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetAnnotationSpecRequest(proto.Message): - r"""Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. - - Attributes: - name (str): - Required. The name of the AnnotationSpec resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListAnnotationsRequest(proto.Message): - r"""Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - - Attributes: - parent (str): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, - sorted in ascending order. Use "desc" after a - field name for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListAnnotationsResponse(proto.Message): - r"""Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - - Attributes: - annotations (Sequence[google.cloud.aiplatform_v1.types.Annotation]): - A list of Annotations that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - annotations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=annotation.Annotation, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py deleted file mode 100644 index b42f406e8c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'DeployedModelRef', - }, -) - - -class DeployedModelRef(proto.Message): - r"""Points to a DeployedModel. - Attributes: - endpoint (str): - Immutable. A resource name of an Endpoint. - deployed_model_id (str): - Immutable. An ID of a DeployedModel in the - above Endpoint. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py deleted file mode 100644 index 3eda5aeb6d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'EncryptionSpec', - }, -) - - -class EncryptionSpec(proto.Message): - r"""Represents a customer-managed encryption key spec that can be - applied to a top-level resource. - - Attributes: - kms_key_name (str): - Required. The Cloud KMS resource identifier of the customer - managed encryption key used to protect a resource. Has the - form: - ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. - The key needs to be in the same region as where the compute - resource is created. - """ - - kms_key_name = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py deleted file mode 100644 index b8bbf96850..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py +++ /dev/null @@ -1,238 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import machine_resources -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Endpoint', - 'DeployedModel', - }, -) - - -class Endpoint(proto.Message): - r"""Models are deployed into it, and afterwards Endpoint is - called to obtain predictions and explanations. - - Attributes: - name (str): - Output only. The resource name of the - Endpoint. - display_name (str): - Required. The display name of the Endpoint. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Endpoint. - deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModel]): - Output only. The models deployed in this Endpoint. To add or - remove DeployedModels use - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel] - and - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel] - respectively. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.Endpoint.TrafficSplitEntry]): - A map from a DeployedModel's ID to the - percentage of this Endpoint's traffic that - should be forwarded to that DeployedModel. - If a DeployedModel's ID is not listed in this - map, then it receives no traffic. - - The traffic percentage values must add up to - 100, or map must be empty if the Endpoint is to - not accept any traffic at a moment. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Endpoint.LabelsEntry]): - The labels with user-defined metadata to - organize your Endpoints. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Endpoint was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Endpoint was - last updated. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for an - Endpoint. If set, this Endpoint and all sub- - resources of this Endpoint will be secured by - this key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - deployed_models = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='DeployedModel', - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=5, - ) - etag = proto.Field( - proto.STRING, - number=6, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - create_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=10, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class DeployedModel(proto.Message): - r"""A deployment of a Model. Endpoints contain one or more - DeployedModels. - - Attributes: - dedicated_resources (google.cloud.aiplatform_v1.types.DedicatedResources): - A description of resources that are dedicated - to the DeployedModel, and that need a higher - degree of manual configuration. - automatic_resources (google.cloud.aiplatform_v1.types.AutomaticResources): - A description of resources that to large - degree are decided by AI Platform, and require - only a modest additional configuration. - id (str): - Output only. The ID of the DeployedModel. - model (str): - Required. The name of the Model that this is - the deployment of. Note that the Model may be in - a different location than the DeployedModel's - Endpoint. - display_name (str): - The display name of the DeployedModel. If not provided upon - creation, the Model's display_name is used. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the DeployedModel - was created. - service_account (str): - The service account that the DeployedModel's container runs - as. Specify the email address of the service account. If - this service account is not specified, the container runs as - a service account that doesn't have access to the resource - project. - - Users deploying the Model must have the - ``iam.serviceAccounts.actAs`` permission on this service - account. - disable_container_logging (bool): - For custom-trained Models and AutoML Tabular Models, the - container of the DeployedModel instances will send - ``stderr`` and ``stdout`` streams to Stackdriver Logging by - default. Please note that the logs incur cost, which are - subject to `Cloud Logging - pricing `__. - - User can disable container logging by setting this flag to - true. - enable_access_logging (bool): - These logs are like standard server access - logs, containing information like timestamp and - latency for each prediction request. - Note that Stackdriver logs may incur a cost, - especially if your project receives prediction - requests at a high queries per second rate - (QPS). Estimate your costs before enabling this - option. - """ - - dedicated_resources = proto.Field( - proto.MESSAGE, - number=7, - oneof='prediction_resources', - message=machine_resources.DedicatedResources, - ) - automatic_resources = proto.Field( - proto.MESSAGE, - number=8, - oneof='prediction_resources', - message=machine_resources.AutomaticResources, - ) - id = proto.Field( - proto.STRING, - number=1, - ) - model = proto.Field( - proto.STRING, - number=2, - ) - display_name = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - service_account = proto.Field( - proto.STRING, - number=11, - ) - disable_container_logging = proto.Field( - proto.BOOL, - number=15, - ) - enable_access_logging = proto.Field( - proto.BOOL, - number=13, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py deleted file mode 100644 index 8a6b87d800..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py +++ /dev/null @@ -1,394 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateEndpointRequest', - 'CreateEndpointOperationMetadata', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UpdateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelRequest', - 'DeployModelResponse', - 'DeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UndeployModelOperationMetadata', - }, -) - - -class CreateEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Endpoint in. Format: - ``projects/{project}/locations/{location}`` - endpoint (google.cloud.aiplatform_v1.types.Endpoint): - Required. The Endpoint to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - endpoint = proto.Field( - proto.MESSAGE, - number=2, - message=gca_endpoint.Endpoint, - ) - - -class CreateEndpointOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] - - Attributes: - name (str): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListEndpointsRequest(proto.Message): - r"""Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Optional. An expression for filtering the results of the - request. For field names both snake_case and camelCase are - supported. - - - ``endpoint`` supports = and !=. ``endpoint`` represents - the Endpoint ID, i.e. the last segment of the Endpoint's - [resource - name][google.cloud.aiplatform.v1.Endpoint.name]. - - ``display_name`` supports = and, != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``endpoint=1`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - Optional. The standard list page size. - page_token (str): - Optional. The standard list page token. Typically obtained - via - [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1.ListEndpointsResponse.next_page_token] - of the previous - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. Mask specifying which fields to - read. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``display_name`` - - ``create_time`` - - ``update_time`` - - Example: ``display_name, create_time desc``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListEndpointsResponse(proto.Message): - r"""Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - - Attributes: - endpoints (Sequence[google.cloud.aiplatform_v1.types.Endpoint]): - List of Endpoints in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1.ListEndpointsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - endpoints = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_endpoint.Endpoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - - Attributes: - endpoint (google.cloud.aiplatform_v1.types.Endpoint): - Required. The Endpoint which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - endpoint = proto.Field( - proto.MESSAGE, - number=1, - message=gca_endpoint.Endpoint, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. - - Attributes: - name (str): - Required. The name of the Endpoint resource to be deleted. - Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class DeployModelRequest(proto.Message): - r"""Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint resource into which to - deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]): - A map from a DeployedModel's ID to the percentage of this - Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the just - being deployed Model, a "0" should be used, and the actual - ID of the new DeployedModel will be filled in its place by - this method. The traffic percentage values must add up to - 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - is not updated. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model = proto.Field( - proto.MESSAGE, - number=2, - message=gca_endpoint.DeployedModel, - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=3, - ) - - -class DeployModelResponse(proto.Message): - r"""Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - Attributes: - deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): - The DeployedModel that had been deployed in - the Endpoint. - """ - - deployed_model = proto.Field( - proto.MESSAGE, - number=1, - message=gca_endpoint.DeployedModel, - ) - - -class DeployModelOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UndeployModelRequest(proto.Message): - r"""Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint resource from which to - undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - deployed_model_id (str): - Required. The ID of the DeployedModel to be - undeployed from the Endpoint. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is being - undeployed from the Endpoint, the [Endpoint.traffic_split] - will always end up empty when this call returns. A - DeployedModel will be successfully undeployed only if it - doesn't have any traffic assigned to it when this method - executes, or if this field unassigns any traffic to it. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=3, - ) - - -class UndeployModelResponse(proto.Message): - r"""Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - """ - - -class UndeployModelOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py deleted file mode 100644 index 956d93aff5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'EnvVar', - }, -) - - -class EnvVar(proto.Message): - r"""Represents an environment variable present in a Container or - Python Module. - - Attributes: - name (str): - Required. Name of the environment variable. - Must be a valid C identifier. - value (str): - Required. Variables that reference a $(VAR_NAME) are - expanded using the previous defined environment variables in - the container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped - references will never be expanded, regardless of whether the - variable exists or not. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py deleted file mode 100644 index c402d8b609..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import study -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'HyperparameterTuningJob', - }, -) - - -class HyperparameterTuningJob(proto.Message): - r"""Represents a HyperparameterTuningJob. A - HyperparameterTuningJob has a Study specification and multiple - CustomJobs with identical CustomJob specification. - - Attributes: - name (str): - Output only. Resource name of the - HyperparameterTuningJob. - display_name (str): - Required. The display name of the - HyperparameterTuningJob. The name can be up to - 128 characters long and can be consist of any - UTF-8 characters. - study_spec (google.cloud.aiplatform_v1.types.StudySpec): - Required. Study configuration of the - HyperparameterTuningJob. - max_trial_count (int): - Required. The desired total number of Trials. - parallel_trial_count (int): - Required. The desired number of Trials to run - in parallel. - max_failed_trial_count (int): - The number of failed Trials that need to be - seen before failing the HyperparameterTuningJob. - If set to 0, Vertex AI decides how many Trials - must fail before the whole job fails. - trial_job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): - Required. The spec of a trial job. The same - spec applies to the CustomJobs created in all - the trials. - trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): - Output only. Trials of the - HyperparameterTuningJob. - state (google.cloud.aiplatform_v1.types.JobState): - Output only. The detailed state of the job. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the - HyperparameterTuningJob was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the HyperparameterTuningJob for the - first time entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the HyperparameterTuningJob entered - any of the following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the - HyperparameterTuningJob was most recently - updated. - error (google.rpc.status_pb2.Status): - Output only. Only populated when job's state is - JOB_STATE_FAILED or JOB_STATE_CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob.LabelsEntry]): - The labels with user-defined metadata to - organize HyperparameterTuningJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key options for a - HyperparameterTuningJob. If this is set, then - all resources created by the - HyperparameterTuningJob will be encrypted with - the provided encryption key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - study_spec = proto.Field( - proto.MESSAGE, - number=4, - message=study.StudySpec, - ) - max_trial_count = proto.Field( - proto.INT32, - number=5, - ) - parallel_trial_count = proto.Field( - proto.INT32, - number=6, - ) - max_failed_trial_count = proto.Field( - proto.INT32, - number=7, - ) - trial_job_spec = proto.Field( - proto.MESSAGE, - number=8, - message=custom_job.CustomJobSpec, - ) - trials = proto.RepeatedField( - proto.MESSAGE, - number=9, - message=study.Trial, - ) - state = proto.Field( - proto.ENUM, - number=10, - enum=job_state.JobState, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=15, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=16, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=17, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py deleted file mode 100644 index 4cb224ae58..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py +++ /dev/null @@ -1,130 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'GcsSource', - 'GcsDestination', - 'BigQuerySource', - 'BigQueryDestination', - 'ContainerRegistryDestination', - }, -) - - -class GcsSource(proto.Message): - r"""The Google Cloud Storage location for the input content. - Attributes: - uris (Sequence[str]): - Required. Google Cloud Storage URI(-s) to the - input file(s). May contain wildcards. For more - information on wildcards, see - https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. - """ - - uris = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class GcsDestination(proto.Message): - r"""The Google Cloud Storage location where the output is to be - written to. - - Attributes: - output_uri_prefix (str): - Required. Google Cloud Storage URI to output - directory. If the uri doesn't end with '/', a - '/' will be automatically appended. The - directory is created if it doesn't exist. - """ - - output_uri_prefix = proto.Field( - proto.STRING, - number=1, - ) - - -class BigQuerySource(proto.Message): - r"""The BigQuery location for the input content. - Attributes: - input_uri (str): - Required. BigQuery URI to a table, up to 2000 characters - long. Accepted forms: - - - BigQuery path. For example: - ``bq://projectId.bqDatasetId.bqTableId``. - """ - - input_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class BigQueryDestination(proto.Message): - r"""The BigQuery location for the output content. - Attributes: - output_uri (str): - Required. BigQuery URI to a project or table, up to 2000 - characters long. - - When only the project is specified, the Dataset and Table is - created. When the full table reference is specified, the - Dataset must exist and table must not exist. - - Accepted forms: - - - BigQuery path. For example: ``bq://projectId`` or - ``bq://projectId.bqDatasetId`` or - ``bq://projectId.bqDatasetId.bqTableId``. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class ContainerRegistryDestination(proto.Message): - r"""The Container Registry location for the container image. - Attributes: - output_uri (str): - Required. Container Registry URI of a container image. Only - Google Container Registry and Artifact Registry are - supported now. Accepted forms: - - - Google Container Registry path. For example: - ``gcr.io/projectId/imageName:tag``. - - - Artifact Registry path. For example: - ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. - - If a tag is not specified, "latest" will be used as the - default tag. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py deleted file mode 100644 index 8d39c4727b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py +++ /dev/null @@ -1,723 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateCustomJobRequest', - 'GetCustomJobRequest', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'DeleteCustomJobRequest', - 'CancelCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'GetDataLabelingJobRequest', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'DeleteDataLabelingJobRequest', - 'CancelDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'GetHyperparameterTuningJobRequest', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'DeleteHyperparameterTuningJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'GetBatchPredictionJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'DeleteBatchPredictionJobRequest', - 'CancelBatchPredictionJobRequest', - }, -) - - -class CreateCustomJobRequest(proto.Message): - r"""Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - CustomJob in. Format: - ``projects/{project}/locations/{location}`` - custom_job (google.cloud.aiplatform_v1.types.CustomJob): - Required. The CustomJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - custom_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_custom_job.CustomJob, - ) - - -class GetCustomJobRequest(proto.Message): - r"""Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListCustomJobsRequest(proto.Message): - r"""Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListCustomJobsResponse.next_page_token] - of the previous - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListCustomJobsResponse(proto.Message): - r"""Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] - - Attributes: - custom_jobs (Sequence[google.cloud.aiplatform_v1.types.CustomJob]): - List of CustomJobs in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1.ListCustomJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - custom_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_custom_job.CustomJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteCustomJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob resource to be deleted. - Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelCustomJobRequest(proto.Message): - r"""Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. - - Attributes: - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob): - Required. The DataLabelingJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - data_labeling_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_data_labeling_job.DataLabelingJob, - ) - - -class GetDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListDataLabelingJobsRequest(proto.Message): - r"""Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - - Attributes: - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. FieldMask represents a - set of symbolic field paths. For example, the mask can be - ``paths: "name"``. The "name" here is a field in - DataLabelingJob. If this field is not set, all fields of the - DataLabelingJob are returned. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order by default. Use ``desc`` after a field name - for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDataLabelingJobsResponse(proto.Message): - r"""Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - - Attributes: - data_labeling_jobs (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob]): - A list of DataLabelingJobs that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - data_labeling_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_data_labeling_job.DataLabelingJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob): - Required. The HyperparameterTuningJob to - create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - hyperparameter_tuning_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, - ) - - -class GetHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob resource. - Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListHyperparameterTuningJobsRequest(proto.Message): - r"""Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsResponse.next_page_token] - of the previous - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListHyperparameterTuningJobsResponse(proto.Message): - r"""Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] - - Attributes: - hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob]): - List of HyperparameterTuningJobs in the requested page. - [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1.HyperparameterTuningJob.trials] - of the jobs will be not be returned. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - hyperparameter_tuning_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob resource - to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob to cancel. - Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob): - Required. The BatchPredictionJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - batch_prediction_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_batch_prediction_job.BatchPredictionJob, - ) - - -class GetBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListBatchPredictionJobsRequest(proto.Message): - r"""Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - - ``model_display_name`` supports = and != - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsResponse.next_page_token] - of the previous - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListBatchPredictionJobsResponse(proto.Message): - r"""Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] - - Attributes: - batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob]): - List of BatchPredictionJobs in the requested - page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - batch_prediction_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_batch_prediction_job.BatchPredictionJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py deleted file mode 100644 index e28545fb22..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'JobState', - }, -) - - -class JobState(proto.Enum): - r"""Describes the state of a job.""" - JOB_STATE_UNSPECIFIED = 0 - JOB_STATE_QUEUED = 1 - JOB_STATE_PENDING = 2 - JOB_STATE_RUNNING = 3 - JOB_STATE_SUCCEEDED = 4 - JOB_STATE_FAILED = 5 - JOB_STATE_CANCELLING = 6 - JOB_STATE_CANCELLED = 7 - JOB_STATE_PAUSED = 8 - JOB_STATE_EXPIRED = 9 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py deleted file mode 100644 index 5bd4d6d5ad..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py +++ /dev/null @@ -1,307 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import accelerator_type as gca_accelerator_type - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'MachineSpec', - 'DedicatedResources', - 'AutomaticResources', - 'BatchDedicatedResources', - 'ResourcesConsumed', - 'DiskSpec', - 'AutoscalingMetricSpec', - }, -) - - -class MachineSpec(proto.Message): - r"""Specification of a single machine. - Attributes: - machine_type (str): - Immutable. The type of the machine. - - See the `list of machine types supported for - prediction `__ - - See the `list of machine types supported for custom - training `__. - - For - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - this field is optional, and the default value is - ``n1-standard-2``. For - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob] - or as part of - [WorkerPoolSpec][google.cloud.aiplatform.v1.WorkerPoolSpec] - this field is required. - accelerator_type (google.cloud.aiplatform_v1.types.AcceleratorType): - Immutable. The type of accelerator(s) that may be attached - to the machine as per - [accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count]. - accelerator_count (int): - The number of accelerators to attach to the - machine. - """ - - machine_type = proto.Field( - proto.STRING, - number=1, - ) - accelerator_type = proto.Field( - proto.ENUM, - number=2, - enum=gca_accelerator_type.AcceleratorType, - ) - accelerator_count = proto.Field( - proto.INT32, - number=3, - ) - - -class DedicatedResources(proto.Message): - r"""A description of resources that are dedicated to a - DeployedModel, and that need a higher degree of manual - configuration. - - Attributes: - machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): - Required. Immutable. The specification of a - single machine used by the prediction. - min_replica_count (int): - Required. Immutable. The minimum number of - machine replicas this DeployedModel will be - always deployed on. This value must be greater - than or equal to 1. - If traffic against the DeployedModel increases, - it may dynamically be deployed onto more - replicas, and as traffic decreases, some of - these extra replicas may be freed. - max_replica_count (int): - Immutable. The maximum number of replicas this DeployedModel - may be deployed on when the traffic against it increases. If - the requested value is too large, the deployment will error, - but if deployment succeeds then the ability to scale the - model to that many replicas is guaranteed (barring service - outages). If traffic against the DeployedModel increases - beyond what its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is not provided, - will use - [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] - as the default value. - autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1.types.AutoscalingMetricSpec]): - Immutable. The metric specifications that overrides a - resource utilization metric (CPU utilization, accelerator's - duty cycle, and so on) target value (default to 60 if not - set). At most one entry is allowed per metric. - - If - [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] - is above 0, the autoscaling will be based on both CPU - utilization and accelerator's duty cycle metrics and scale - up when either metrics exceeds its target value while scale - down if both metrics are under their target value. The - default target value is 60 for both metrics. - - If - [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] - is 0, the autoscaling will be based on CPU utilization - metric only with default target value 60 if not explicitly - set. - - For example, in the case of Online Prediction, if you want - to override target CPU utilization to 80, you should set - [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1.AutoscalingMetricSpec.metric_name] - to - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` - and - [autoscaling_metric_specs.target][google.cloud.aiplatform.v1.AutoscalingMetricSpec.target] - to ``80``. - """ - - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message='MachineSpec', - ) - min_replica_count = proto.Field( - proto.INT32, - number=2, - ) - max_replica_count = proto.Field( - proto.INT32, - number=3, - ) - autoscaling_metric_specs = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='AutoscalingMetricSpec', - ) - - -class AutomaticResources(proto.Message): - r"""A description of resources that to large degree are decided - by Vertex AI, and require only a modest additional - configuration. Each Model supporting these resources documents - its specific guidelines. - - Attributes: - min_replica_count (int): - Immutable. The minimum number of replicas this DeployedModel - will be always deployed on. If traffic against it increases, - it may dynamically be deployed onto more replicas up to - [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], - and as traffic decreases, some of these extra replicas may - be freed. If the requested value is too large, the - deployment will error. - max_replica_count (int): - Immutable. The maximum number of replicas - this DeployedModel may be deployed on when the - traffic against it increases. If the requested - value is too large, the deployment will error, - but if deployment succeeds then the ability to - scale the model to that many replicas is - guaranteed (barring service outages). If traffic - against the DeployedModel increases beyond what - its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is - not provided, a no upper bound for scaling under - heavy traffic will be assume, though Vertex AI - may be unable to scale beyond certain replica - number. - """ - - min_replica_count = proto.Field( - proto.INT32, - number=1, - ) - max_replica_count = proto.Field( - proto.INT32, - number=2, - ) - - -class BatchDedicatedResources(proto.Message): - r"""A description of resources that are used for performing batch - operations, are dedicated to a Model, and need manual - configuration. - - Attributes: - machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): - Required. Immutable. The specification of a - single machine. - starting_replica_count (int): - Immutable. The number of machine replicas used at the start - of the batch operation. If not set, Vertex AI decides - starting number, not greater than - [max_replica_count][google.cloud.aiplatform.v1.BatchDedicatedResources.max_replica_count] - max_replica_count (int): - Immutable. The maximum number of machine - replicas the batch operation may be scaled to. - The default value is 10. - """ - - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message='MachineSpec', - ) - starting_replica_count = proto.Field( - proto.INT32, - number=2, - ) - max_replica_count = proto.Field( - proto.INT32, - number=3, - ) - - -class ResourcesConsumed(proto.Message): - r"""Statistics information about resource consumption. - Attributes: - replica_hours (float): - Output only. The number of replica hours - used. Note that many replicas may run in - parallel, and additionally any given work may be - queued for some time. Therefore this value is - not strictly related to wall time. - """ - - replica_hours = proto.Field( - proto.DOUBLE, - number=1, - ) - - -class DiskSpec(proto.Message): - r"""Represents the spec of disk options. - Attributes: - boot_disk_type (str): - Type of the boot disk (default is "pd-ssd"). - Valid values: "pd-ssd" (Persistent Disk Solid - State Drive) or "pd-standard" (Persistent Disk - Hard Disk Drive). - boot_disk_size_gb (int): - Size in GB of the boot disk (default is - 100GB). - """ - - boot_disk_type = proto.Field( - proto.STRING, - number=1, - ) - boot_disk_size_gb = proto.Field( - proto.INT32, - number=2, - ) - - -class AutoscalingMetricSpec(proto.Message): - r"""The metric specification that defines the target resource - utilization (CPU utilization, accelerator's duty cycle, and so - on) for calculating the desired replica count. - - Attributes: - metric_name (str): - Required. The resource metric name. Supported metrics: - - - For Online Prediction: - - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` - - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` - target (int): - The target resource utilization in percentage - (1% - 100%) for the given metric; once the real - usage deviates from the target by a certain - percentage, the machine replicas change. The - default value is 60 (representing 60%) if not - provided. - """ - - metric_name = proto.Field( - proto.STRING, - number=1, - ) - target = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py deleted file mode 100644 index 9257b29e74..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ManualBatchTuningParameters', - }, -) - - -class ManualBatchTuningParameters(proto.Message): - r"""Manual batch tuning parameters. - Attributes: - batch_size (int): - Immutable. The number of the records (e.g. - instances) of the operation given in each batch - to a machine replica. Machine type, and size of - a single record should be considered when - setting this parameter, higher value speeds up - the batch operation's execution, but too high - value will result in a whole batch not fitting - in a machine's memory, and the whole operation - will fail. - The default value is 4. - """ - - batch_size = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py deleted file mode 100644 index 2ac7b98679..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py +++ /dev/null @@ -1,209 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'MigratableResource', - }, -) - - -class MigratableResource(proto.Message): - r"""Represents one resource that exists in automl.googleapis.com, - datalabeling.googleapis.com or ml.googleapis.com. - - Attributes: - ml_engine_model_version (google.cloud.aiplatform_v1.types.MigratableResource.MlEngineModelVersion): - Output only. Represents one Version in - ml.googleapis.com. - automl_model (google.cloud.aiplatform_v1.types.MigratableResource.AutomlModel): - Output only. Represents one Model in - automl.googleapis.com. - automl_dataset (google.cloud.aiplatform_v1.types.MigratableResource.AutomlDataset): - Output only. Represents one Dataset in - automl.googleapis.com. - data_labeling_dataset (google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset): - Output only. Represents one Dataset in - datalabeling.googleapis.com. - last_migrate_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the last - migration attempt on this MigratableResource - started. Will not be set if there's no migration - attempt on this MigratableResource. - last_update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MigratableResource was last updated. - """ - - class MlEngineModelVersion(proto.Message): - r"""Represents one model Version in ml.googleapis.com. - Attributes: - endpoint (str): - The ml.googleapis.com endpoint that this model Version - currently lives in. Example values: - - - ml.googleapis.com - - us-centrall-ml.googleapis.com - - europe-west4-ml.googleapis.com - - asia-east1-ml.googleapis.com - version (str): - Full resource name of ml engine model Version. Format: - ``projects/{project}/models/{model}/versions/{version}``. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.STRING, - number=2, - ) - - class AutomlModel(proto.Message): - r"""Represents one Model in automl.googleapis.com. - Attributes: - model (str): - Full resource name of automl Model. Format: - ``projects/{project}/locations/{location}/models/{model}``. - model_display_name (str): - The Model's display name in - automl.googleapis.com. - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - model_display_name = proto.Field( - proto.STRING, - number=3, - ) - - class AutomlDataset(proto.Message): - r"""Represents one Dataset in automl.googleapis.com. - Attributes: - dataset (str): - Full resource name of automl Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}``. - dataset_display_name (str): - The Dataset's display name in - automl.googleapis.com. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=4, - ) - - class DataLabelingDataset(proto.Message): - r"""Represents one Dataset in datalabeling.googleapis.com. - Attributes: - dataset (str): - Full resource name of data labeling Dataset. Format: - ``projects/{project}/datasets/{dataset}``. - dataset_display_name (str): - The Dataset's display name in - datalabeling.googleapis.com. - data_labeling_annotated_datasets (Sequence[google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset]): - The migratable AnnotatedDataset in - datalabeling.googleapis.com belongs to the data - labeling Dataset. - """ - - class DataLabelingAnnotatedDataset(proto.Message): - r"""Represents one AnnotatedDataset in - datalabeling.googleapis.com. - - Attributes: - annotated_dataset (str): - Full resource name of data labeling AnnotatedDataset. - Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. - annotated_dataset_display_name (str): - The AnnotatedDataset's display name in - datalabeling.googleapis.com. - """ - - annotated_dataset = proto.Field( - proto.STRING, - number=1, - ) - annotated_dataset_display_name = proto.Field( - proto.STRING, - number=3, - ) - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=4, - ) - data_labeling_annotated_datasets = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset', - ) - - ml_engine_model_version = proto.Field( - proto.MESSAGE, - number=1, - oneof='resource', - message=MlEngineModelVersion, - ) - automl_model = proto.Field( - proto.MESSAGE, - number=2, - oneof='resource', - message=AutomlModel, - ) - automl_dataset = proto.Field( - proto.MESSAGE, - number=3, - oneof='resource', - message=AutomlDataset, - ) - data_labeling_dataset = proto.Field( - proto.MESSAGE, - number=4, - oneof='resource', - message=DataLabelingDataset, - ) - last_migrate_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - last_update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py deleted file mode 100644 index 80192fc6e7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py +++ /dev/null @@ -1,439 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import migratable_resource as gca_migratable_resource -from google.cloud.aiplatform_v1.types import operation -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'BatchMigrateResourcesRequest', - 'MigrateResourceRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceResponse', - 'BatchMigrateResourcesOperationMetadata', - }, -) - - -class SearchMigratableResourcesRequest(proto.Message): - r"""Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - - Attributes: - parent (str): - Required. The location that the migratable resources should - be searched from. It's the Vertex AI location that the - resources can be migrated to, not the resources' original - location. Format: - ``projects/{project}/locations/{location}`` - page_size (int): - The standard page size. - The default and maximum value is 100. - page_token (str): - The standard page token. - filter (str): - A filter for your search. You can use the following types of - filters: - - - Resource type filters. The following strings filter for a - specific type of - [MigratableResource][google.cloud.aiplatform.v1.MigratableResource]: - - - ``ml_engine_model_version:*`` - - ``automl_model:*`` - - ``automl_dataset:*`` - - ``data_labeling_dataset:*`` - - - "Migrated or not" filters. The following strings filter - for resources that either have or have not already been - migrated: - - - ``last_migrate_time:*`` filters for migrated - resources. - - ``NOT last_migrate_time:*`` filters for not yet - migrated resources. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class SearchMigratableResourcesResponse(proto.Message): - r"""Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - - Attributes: - migratable_resources (Sequence[google.cloud.aiplatform_v1.types.MigratableResource]): - All migratable resources that can be migrated - to the location specified in the request. - next_page_token (str): - The standard next-page token. The migratable_resources may - not fill page_size in SearchMigratableResourcesRequest even - when there are subsequent pages. - """ - - @property - def raw_page(self): - return self - - migratable_resources = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_migratable_resource.MigratableResource, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class BatchMigrateResourcesRequest(proto.Message): - r"""Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - Attributes: - parent (str): - Required. The location of the migrated resource will live - in. Format: ``projects/{project}/locations/{location}`` - migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]): - Required. The request messages specifying the - resources to migrate. They must be in the same - location as the destination. Up to 50 resources - can be migrated in one batch. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - migrate_resource_requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='MigrateResourceRequest', - ) - - -class MigrateResourceRequest(proto.Message): - r"""Config of migrating one resource from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - Attributes: - migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): - Config for migrating Version in - ml.googleapis.com to Vertex AI's Model. - migrate_automl_model_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlModelConfig): - Config for migrating Model in - automl.googleapis.com to Vertex AI's Model. - migrate_automl_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): - Config for migrating Dataset in - automl.googleapis.com to Vertex AI's Dataset. - migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): - Config for migrating Dataset in - datalabeling.googleapis.com to Vertex AI's - Dataset. - """ - - class MigrateMlEngineModelVersionConfig(proto.Message): - r"""Config for migrating version in ml.googleapis.com to Vertex - AI's Model. - - Attributes: - endpoint (str): - Required. The ml.googleapis.com endpoint that this model - version should be migrated from. Example values: - - - ml.googleapis.com - - - us-centrall-ml.googleapis.com - - - europe-west4-ml.googleapis.com - - - asia-east1-ml.googleapis.com - model_version (str): - Required. Full resource name of ml engine model version. - Format: - ``projects/{project}/models/{model}/versions/{version}``. - model_display_name (str): - Required. Display name of the model in Vertex - AI. System will pick a display name if - unspecified. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - model_version = proto.Field( - proto.STRING, - number=2, - ) - model_display_name = proto.Field( - proto.STRING, - number=3, - ) - - class MigrateAutomlModelConfig(proto.Message): - r"""Config for migrating Model in automl.googleapis.com to Vertex - AI's Model. - - Attributes: - model (str): - Required. Full resource name of automl Model. Format: - ``projects/{project}/locations/{location}/models/{model}``. - model_display_name (str): - Optional. Display name of the model in Vertex - AI. System will pick a display name if - unspecified. - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - model_display_name = proto.Field( - proto.STRING, - number=2, - ) - - class MigrateAutomlDatasetConfig(proto.Message): - r"""Config for migrating Dataset in automl.googleapis.com to - Vertex AI's Dataset. - - Attributes: - dataset (str): - Required. Full resource name of automl Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}``. - dataset_display_name (str): - Required. Display name of the Dataset in - Vertex AI. System will pick a display name if - unspecified. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=2, - ) - - class MigrateDataLabelingDatasetConfig(proto.Message): - r"""Config for migrating Dataset in datalabeling.googleapis.com - to AI Platform's Dataset. - - Attributes: - dataset (str): - Required. Full resource name of data labeling Dataset. - Format: ``projects/{project}/datasets/{dataset}``. - dataset_display_name (str): - Optional. Display name of the Dataset in - Vertex AI. System will pick a display name if - unspecified. - migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): - Optional. Configs for migrating - AnnotatedDataset in datalabeling.googleapis.com - to Vertex AI's SavedQuery. The specified - AnnotatedDatasets have to belong to the - datalabeling Dataset. - """ - - class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): - r"""Config for migrating AnnotatedDataset in - datalabeling.googleapis.com to Vertex AI's SavedQuery. - - Attributes: - annotated_dataset (str): - Required. Full resource name of data labeling - AnnotatedDataset. Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. - """ - - annotated_dataset = proto.Field( - proto.STRING, - number=1, - ) - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=2, - ) - migrate_data_labeling_annotated_dataset_configs = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig', - ) - - migrate_ml_engine_model_version_config = proto.Field( - proto.MESSAGE, - number=1, - oneof='request', - message=MigrateMlEngineModelVersionConfig, - ) - migrate_automl_model_config = proto.Field( - proto.MESSAGE, - number=2, - oneof='request', - message=MigrateAutomlModelConfig, - ) - migrate_automl_dataset_config = proto.Field( - proto.MESSAGE, - number=3, - oneof='request', - message=MigrateAutomlDatasetConfig, - ) - migrate_data_labeling_dataset_config = proto.Field( - proto.MESSAGE, - number=4, - oneof='request', - message=MigrateDataLabelingDatasetConfig, - ) - - -class BatchMigrateResourcesResponse(proto.Message): - r"""Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - Attributes: - migrate_resource_responses (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceResponse]): - Successfully migrated resources. - """ - - migrate_resource_responses = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='MigrateResourceResponse', - ) - - -class MigrateResourceResponse(proto.Message): - r"""Describes a successfully migrated resource. - Attributes: - dataset (str): - Migrated Dataset's resource name. - model (str): - Migrated Model's resource name. - migratable_resource (google.cloud.aiplatform_v1.types.MigratableResource): - Before migration, the identifier in - ml.googleapis.com, automl.googleapis.com or - datalabeling.googleapis.com. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - oneof='migrated_resource', - ) - model = proto.Field( - proto.STRING, - number=2, - oneof='migrated_resource', - ) - migratable_resource = proto.Field( - proto.MESSAGE, - number=3, - message=gca_migratable_resource.MigratableResource, - ) - - -class BatchMigrateResourcesOperationMetadata(proto.Message): - r"""Runtime operation information for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - partial_results (Sequence[google.cloud.aiplatform_v1.types.BatchMigrateResourcesOperationMetadata.PartialResult]): - Partial results that reflect the latest - migration operation progress. - """ - - class PartialResult(proto.Message): - r"""Represents a partial result in batch migration operation for one - [MigrateResourceRequest][google.cloud.aiplatform.v1.MigrateResourceRequest]. - - Attributes: - error (google.rpc.status_pb2.Status): - The error result of the migration request in - case of failure. - model (str): - Migrated model resource name. - dataset (str): - Migrated dataset resource name. - request (google.cloud.aiplatform_v1.types.MigrateResourceRequest): - It's the same as the value in - [MigrateResourceRequest.migrate_resource_requests][]. - """ - - error = proto.Field( - proto.MESSAGE, - number=2, - oneof='result', - message=status_pb2.Status, - ) - model = proto.Field( - proto.STRING, - number=3, - oneof='result', - ) - dataset = proto.Field( - proto.STRING, - number=4, - oneof='result', - ) - request = proto.Field( - proto.MESSAGE, - number=1, - message='MigrateResourceRequest', - ) - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - partial_results = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=PartialResult, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py deleted file mode 100644 index 5a2c50c153..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py +++ /dev/null @@ -1,707 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Model', - 'PredictSchemata', - 'ModelContainerSpec', - 'Port', - }, -) - - -class Model(proto.Message): - r"""A trained machine learning Model. - Attributes: - name (str): - The resource name of the Model. - display_name (str): - Required. The display name of the Model. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Model. - predict_schemata (google.cloud.aiplatform_v1.types.PredictSchemata): - The schemata that describe formats of the Model's - predictions and explanations as given and returned via - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] - and [PredictionService.Explain][]. - metadata_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing additional information about the Model, - that is specific to it. Unset if the Model does not have any - additional information. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI, - if no additional metadata is needed, this field is set to an - empty string. Note: The URI given on output will be - immutable and probably different, including the URI scheme, - than the one given on input. The output URI will point to a - location where the user only has a read access. - metadata (google.protobuf.struct_pb2.Value): - Immutable. An additional information about the Model; the - schema of the metadata can be found in - [metadata_schema][google.cloud.aiplatform.v1.Model.metadata_schema_uri]. - Unset if the Model does not have any additional information. - supported_export_formats (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat]): - Output only. The formats in which this Model - may be exported. If empty, this Model is not - available for export. - training_pipeline (str): - Output only. The resource name of the - TrainingPipeline that uploaded this Model, if - any. - container_spec (google.cloud.aiplatform_v1.types.ModelContainerSpec): - Input only. The specification of the container that is to be - used when deploying this Model. The specification is - ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], - and all binaries it contains are copied and stored - internally by Vertex AI. Not present for AutoML Models. - artifact_uri (str): - Immutable. The path to the directory - containing the Model artifact and any of its - supporting files. Not present for AutoML Models. - supported_deployment_resources_types (Sequence[google.cloud.aiplatform_v1.types.Model.DeploymentResourcesType]): - Output only. When this Model is deployed, its prediction - resources are described by the ``prediction_resources`` - field of the - [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] - object. Because not all Models support all resource - configuration types, the configuration types this Model - supports are listed here. If no configuration types are - listed, the Model cannot be deployed to an - [Endpoint][google.cloud.aiplatform.v1.Endpoint] and does not - support online predictions - ([PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] - or [PredictionService.Explain][]). Such a Model can serve - predictions by using a - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob], - if it has at least one entry each in - [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] - and - [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. - supported_input_storage_formats (Sequence[str]): - Output only. The formats this Model supports in - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - exists, the instances should be given as per that schema. - - The possible formats are: - - - ``jsonl`` The JSON Lines format, where each instance is a - single line. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``csv`` The CSV format, where each instance is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``tf-record`` The TFRecord format, where each instance is - a single record in tfrecord syntax. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``tf-record-gzip`` Similar to ``tf-record``, but the file - is gzipped. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``bigquery`` Each instance is a single row in BigQuery. - Uses - [BigQuerySource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.bigquery_source]. - - - ``file-list`` Each line of the file is the location of an - instance to process, uses ``gcs_source`` field of the - [InputConfig][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig] - object. - - If this Model doesn't support any of these formats it means - it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], - it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] - or [PredictionService.Explain][]. - supported_output_storage_formats (Sequence[str]): - Output only. The formats this Model supports in - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. - If both - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - and - [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri] - exist, the predictions are returned together with their - instances. In other words, the prediction has the original - instance data first, followed by the actual prediction - content (as per the schema). - - The possible formats are: - - - ``jsonl`` The JSON Lines format, where each prediction is - a single line. Uses - [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. - - - ``csv`` The CSV format, where each prediction is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. - - - ``bigquery`` Each prediction is a single row in a - BigQuery table, uses - [BigQueryDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.bigquery_destination] - . - - If this Model doesn't support any of these formats it means - it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], - it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] - or [PredictionService.Explain][]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Model was - uploaded into Vertex AI. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Model was - most recently updated. - deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModelRef]): - Output only. The pointers to DeployedModels - created from this Model. Note that Model could - have been deployed to Endpoints in different - Locations. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Model.LabelsEntry]): - The labels with user-defined metadata to - organize your Models. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Model. If set, this Model and all sub-resources - of this Model will be secured by this key. - """ - class DeploymentResourcesType(proto.Enum): - r"""Identifies a type of Model's prediction resources.""" - DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0 - DEDICATED_RESOURCES = 1 - AUTOMATIC_RESOURCES = 2 - - class ExportFormat(proto.Message): - r"""Represents export format supported by the Model. - All formats export to Google Cloud Storage. - - Attributes: - id (str): - Output only. The ID of the export format. The possible - format IDs are: - - - ``tflite`` Used for Android mobile devices. - - - ``edgetpu-tflite`` Used for `Edge - TPU `__ devices. - - - ``tf-saved-model`` A tensorflow model in SavedModel - format. - - - ``tf-js`` A - `TensorFlow.js `__ model - that can be used in the browser and in Node.js using - JavaScript. - - - ``core-ml`` Used for iOS mobile devices. - - - ``custom-trained`` A Model that was uploaded or trained - by custom code. - exportable_contents (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat.ExportableContent]): - Output only. The content of this Model that - may be exported. - """ - class ExportableContent(proto.Enum): - r"""The Model content that can be exported.""" - EXPORTABLE_CONTENT_UNSPECIFIED = 0 - ARTIFACT = 1 - IMAGE = 2 - - id = proto.Field( - proto.STRING, - number=1, - ) - exportable_contents = proto.RepeatedField( - proto.ENUM, - number=2, - enum='Model.ExportFormat.ExportableContent', - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - predict_schemata = proto.Field( - proto.MESSAGE, - number=4, - message='PredictSchemata', - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=5, - ) - metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - supported_export_formats = proto.RepeatedField( - proto.MESSAGE, - number=20, - message=ExportFormat, - ) - training_pipeline = proto.Field( - proto.STRING, - number=7, - ) - container_spec = proto.Field( - proto.MESSAGE, - number=9, - message='ModelContainerSpec', - ) - artifact_uri = proto.Field( - proto.STRING, - number=26, - ) - supported_deployment_resources_types = proto.RepeatedField( - proto.ENUM, - number=10, - enum=DeploymentResourcesType, - ) - supported_input_storage_formats = proto.RepeatedField( - proto.STRING, - number=11, - ) - supported_output_storage_formats = proto.RepeatedField( - proto.STRING, - number=12, - ) - create_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - deployed_models = proto.RepeatedField( - proto.MESSAGE, - number=15, - message=deployed_model_ref.DeployedModelRef, - ) - etag = proto.Field( - proto.STRING, - number=16, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=17, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=24, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class PredictSchemata(proto.Message): - r"""Contains the schemata used in Model's predictions and explanations - via - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict], - [PredictionService.Explain][] and - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - - Attributes: - instance_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the format of a single instance, which - are used in - [PredictRequest.instances][google.cloud.aiplatform.v1.PredictRequest.instances], - [ExplainRequest.instances][] and - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - parameters_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the parameters of prediction and - explanation via - [PredictRequest.parameters][google.cloud.aiplatform.v1.PredictRequest.parameters], - [ExplainRequest.parameters][] and - [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1.BatchPredictionJob.model_parameters]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI, - if no parameters are supported, then it is set to an empty - string. Note: The URI given on output will be immutable and - probably different, including the URI scheme, than the one - given on input. The output URI will point to a location - where the user only has a read access. - prediction_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the format of a single prediction - produced by this Model, which are returned via - [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions], - [ExplainResponse.explanations][], and - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - """ - - instance_schema_uri = proto.Field( - proto.STRING, - number=1, - ) - parameters_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - prediction_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - - -class ModelContainerSpec(proto.Message): - r"""Specification of a container for serving predictions. Some fields in - this message correspond to fields in the `Kubernetes Container v1 - core - specification `__. - - Attributes: - image_uri (str): - Required. Immutable. URI of the Docker image to be used as - the custom container for serving predictions. This URI must - identify an image in Artifact Registry or Container - Registry. Learn more about the `container publishing - requirements `__, - including permissions requirements for the AI Platform - Service Agent. - - The container image is ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], - stored internally, and this original path is afterwards not - used. - - To learn about the requirements for the Docker image itself, - see `Custom container - requirements `__. - - You can use the URI to one of Vertex AI's `pre-built - container images for - prediction `__ - in this field. - command (Sequence[str]): - Immutable. Specifies the command that runs when the - container starts. This overrides the container's - `ENTRYPOINT `__. - Specify this field as an array of executable and arguments, - similar to a Docker ``ENTRYPOINT``'s "exec" form, not its - "shell" form. - - If you do not specify this field, then the container's - ``ENTRYPOINT`` runs, in conjunction with the - [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] - field or the container's - ```CMD`` `__, - if either exists. If this field is not specified and the - container does not have an ``ENTRYPOINT``, then refer to the - Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` - interact `__. - - If you specify this field, then you can also specify the - ``args`` field to provide additional arguments for this - command. However, if you specify this field, then the - container's ``CMD`` is ignored. See the `Kubernetes - documentation about how the ``command`` and ``args`` fields - interact with a container's ``ENTRYPOINT`` and - ``CMD`` `__. - - In this field, you can reference `environment variables set - by Vertex - AI `__ - and environment variables set in the - [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] - field. You cannot reference environment variables set in the - Docker image. In order for environment variables to be - expanded, reference them by using the following syntax: - $(VARIABLE_NAME) Note that this differs from Bash variable - expansion, which does not use parentheses. If a variable - cannot be resolved, the reference in the input string is - used unchanged. To avoid variable expansion, you can escape - this syntax with ``$$``; for example: $$(VARIABLE_NAME) This - field corresponds to the ``command`` field of the Kubernetes - Containers `v1 core - API `__. - args (Sequence[str]): - Immutable. Specifies arguments for the command that runs - when the container starts. This overrides the container's - ```CMD`` `__. - Specify this field as an array of executable and arguments, - similar to a Docker ``CMD``'s "default parameters" form. - - If you don't specify this field but do specify the - [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] - field, then the command from the ``command`` field runs - without any additional arguments. See the `Kubernetes - documentation about how the ``command`` and ``args`` fields - interact with a container's ``ENTRYPOINT`` and - ``CMD`` `__. - - If you don't specify this field and don't specify the - ``command`` field, then the container's - ```ENTRYPOINT`` `__ - and ``CMD`` determine what runs based on their default - behavior. See the Docker documentation about `how ``CMD`` - and ``ENTRYPOINT`` - interact `__. - - In this field, you can reference `environment variables set - by Vertex - AI `__ - and environment variables set in the - [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] - field. You cannot reference environment variables set in the - Docker image. In order for environment variables to be - expanded, reference them by using the following syntax: - $(VARIABLE_NAME) Note that this differs from Bash variable - expansion, which does not use parentheses. If a variable - cannot be resolved, the reference in the input string is - used unchanged. To avoid variable expansion, you can escape - this syntax with ``$$``; for example: $$(VARIABLE_NAME) This - field corresponds to the ``args`` field of the Kubernetes - Containers `v1 core - API `__. - env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): - Immutable. List of environment variables to set in the - container. After the container starts running, code running - in the container can read these environment variables. - - Additionally, the - [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] - and - [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] - fields can reference these variables. Later entries in this - list can also reference earlier entries. For example, the - following example sets the variable ``VAR_2`` to have the - value ``foo bar``: - - .. code:: json - - [ - { - "name": "VAR_1", - "value": "foo" - }, - { - "name": "VAR_2", - "value": "$(VAR_1) bar" - } - ] - - If you switch the order of the variables in the example, - then the expansion does not occur. - - This field corresponds to the ``env`` field of the - Kubernetes Containers `v1 core - API `__. - ports (Sequence[google.cloud.aiplatform_v1.types.Port]): - Immutable. List of ports to expose from the container. - Vertex AI sends any prediction requests that it receives to - the first port on this list. AI Platform also sends - `liveness and health - checks `__ - to this port. - - If you do not specify this field, it defaults to following - value: - - .. code:: json - - [ - { - "containerPort": 8080 - } - ] - - Vertex AI does not use ports other than the first one - listed. This field corresponds to the ``ports`` field of the - Kubernetes Containers `v1 core - API `__. - predict_route (str): - Immutable. HTTP path on the container to send prediction - requests to. Vertex AI forwards requests sent using - [projects.locations.endpoints.predict][google.cloud.aiplatform.v1.PredictionService.Predict] - to this path on the container's IP address and port. Vertex - AI then returns the container's response in the API - response. - - For example, if you set this field to ``/foo``, then when - Vertex AI receives a prediction request, it forwards the - request body in a POST request to the ``/foo`` path on the - port of your container specified by the first value of this - ``ModelContainerSpec``'s - [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] - field. - - If you don't specify this field, it defaults to the - following value when you [deploy this Model to an - Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]: - /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict - The placeholders in this value are replaced as follows: - - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) - health_route (str): - Immutable. HTTP path on the container to send health checks - to. Vertex AI intermittently sends GET requests to this path - on the container's IP address and port to check that the - container is healthy. Read more about `health - checks `__. - - For example, if you set this field to ``/bar``, then Vertex - AI intermittently sends a GET request to the ``/bar`` path - on the port of your container specified by the first value - of this ``ModelContainerSpec``'s - [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] - field. - - If you don't specify this field, it defaults to the - following value when you [deploy this Model to an - Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]: - /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict - The placeholders in this value are replaced as follows: - - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) - """ - - image_uri = proto.Field( - proto.STRING, - number=1, - ) - command = proto.RepeatedField( - proto.STRING, - number=2, - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - env = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=env_var.EnvVar, - ) - ports = proto.RepeatedField( - proto.MESSAGE, - number=5, - message='Port', - ) - predict_route = proto.Field( - proto.STRING, - number=6, - ) - health_route = proto.Field( - proto.STRING, - number=7, - ) - - -class Port(proto.Message): - r"""Represents a network port in a container. - Attributes: - container_port (int): - The number of the port to expose on the pod's - IP address. Must be a valid port number, between - 1 and 65535 inclusive. - """ - - container_port = proto.Field( - proto.INT32, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py deleted file mode 100644 index 190ec683ca..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ModelEvaluation', - }, -) - - -class ModelEvaluation(proto.Message): - r"""A collection of metrics calculated by comparing Model's - predictions on all of the test data against annotations from the - test data. - - Attributes: - name (str): - Output only. The resource name of the - ModelEvaluation. - metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the - [metrics][google.cloud.aiplatform.v1.ModelEvaluation.metrics] - of this ModelEvaluation. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - metrics (google.protobuf.struct_pb2.Value): - Output only. Evaluation metrics of the Model. The schema of - the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluation.metrics_schema_uri] - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelEvaluation was created. - slice_dimensions (Sequence[str]): - Output only. All possible - [dimensions][ModelEvaluationSlice.slice.dimension] of - ModelEvaluationSlices. The dimensions can be used as the - filter of the - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] - request, in the form of ``slice.dimension = ``. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - metrics_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - metrics = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - slice_dimensions = proto.RepeatedField( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py deleted file mode 100644 index 9a553e320a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ModelEvaluationSlice', - }, -) - - -class ModelEvaluationSlice(proto.Message): - r"""A collection of metrics calculated by comparing Model's - predictions on a slice of the test data against ground truth - annotations. - - Attributes: - name (str): - Output only. The resource name of the - ModelEvaluationSlice. - slice_ (google.cloud.aiplatform_v1.types.ModelEvaluationSlice.Slice): - Output only. The slice of the test data that - is used to evaluate the Model. - metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the - [metrics][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics] - of this ModelEvaluationSlice. The schema is defined as an - OpenAPI 3.0.2 `Schema - Object `__. - metrics (google.protobuf.struct_pb2.Value): - Output only. Sliced evaluation metrics of the Model. The - schema of the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics_schema_uri] - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelEvaluationSlice was created. - """ - - class Slice(proto.Message): - r"""Definition of a slice. - Attributes: - dimension (str): - Output only. The dimension of the slice. Well-known - dimensions are: - - - ``annotationSpec``: This slice is on the test data that - has either ground truth or prediction with - [AnnotationSpec.display_name][google.cloud.aiplatform.v1.AnnotationSpec.display_name] - equals to - [value][google.cloud.aiplatform.v1.ModelEvaluationSlice.Slice.value]. - value (str): - Output only. The value of the dimension in - this slice. - """ - - dimension = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - slice_ = proto.Field( - proto.MESSAGE, - number=2, - message=Slice, - ) - metrics_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - metrics = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py deleted file mode 100644 index 088fd612c3..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py +++ /dev/null @@ -1,583 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'UploadModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelResponse', - 'GetModelRequest', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'DeleteModelRequest', - 'ExportModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'GetModelEvaluationSliceRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - }, -) - - -class UploadModelRequest(proto.Message): - r"""Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. - - Attributes: - parent (str): - Required. The resource name of the Location into which to - upload the Model. Format: - ``projects/{project}/locations/{location}`` - model (google.cloud.aiplatform_v1.types.Model): - Required. The Model to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - model = proto.Field( - proto.MESSAGE, - number=2, - message=gca_model.Model, - ) - - -class UploadModelOperationMetadata(proto.Message): - r"""Details of - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] - operation. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UploadModelResponse(proto.Message): - r"""Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] - operation. - - Attributes: - model (str): - The name of the uploaded Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - - -class GetModelRequest(proto.Message): - r"""Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. - - Attributes: - name (str): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelsRequest(proto.Message): - r"""Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - An expression for filtering the results of the request. For - field names both snake_case and camelCase are supported. - - - ``model`` supports = and !=. ``model`` represents the - Model ID, i.e. the last segment of the Model's [resource - name][google.cloud.aiplatform.v1.Model.name]. - - ``display_name`` supports = and != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``model=1234`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelsResponse.next_page_token] - of the previous - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``display_name`` - - ``create_time`` - - ``update_time`` - - Example: ``display_name, create_time desc``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListModelsResponse(proto.Message): - r"""Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] - - Attributes: - models (Sequence[google.cloud.aiplatform_v1.types.Model]): - List of Models in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelsRequest.page_token][google.cloud.aiplatform.v1.ListModelsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - models = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateModelRequest(proto.Message): - r"""Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. - - Attributes: - model (google.cloud.aiplatform_v1.types.Model): - Required. The Model which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - model = proto.Field( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteModelRequest(proto.Message): - r"""Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. - - Attributes: - name (str): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ExportModelRequest(proto.Message): - r"""Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. - - Attributes: - name (str): - Required. The resource name of the Model to export. Format: - ``projects/{project}/locations/{location}/models/{model}`` - output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig): - Required. The desired output location and - configuration. - """ - - class OutputConfig(proto.Message): - r"""Output configuration for the Model export. - Attributes: - export_format_id (str): - The ID of the format in which the Model must be exported. - Each Model lists the [export formats it - supports][google.cloud.aiplatform.v1.Model.supported_export_formats]. - If no value is provided here, then the first from the list - of the Model's supported formats is used by default. - artifact_destination (google.cloud.aiplatform_v1.types.GcsDestination): - The Cloud Storage location where the Model artifact is to be - written to. Under the directory given as the destination a - new one with name - "``model-export--``", - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format, will be created. Inside, the Model and any of its - supporting files will be written. This field should only be - set when the ``exportableContent`` field of the - [Model.supported_export_formats] object contains - ``ARTIFACT``. - image_destination (google.cloud.aiplatform_v1.types.ContainerRegistryDestination): - The Google Container Registry or Artifact Registry uri where - the Model container image will be copied to. This field - should only be set when the ``exportableContent`` field of - the [Model.supported_export_formats] object contains - ``IMAGE``. - """ - - export_format_id = proto.Field( - proto.STRING, - number=1, - ) - artifact_destination = proto.Field( - proto.MESSAGE, - number=3, - message=io.GcsDestination, - ) - image_destination = proto.Field( - proto.MESSAGE, - number=4, - message=io.ContainerRegistryDestination, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - output_config = proto.Field( - proto.MESSAGE, - number=2, - message=OutputConfig, - ) - - -class ExportModelOperationMetadata(proto.Message): - r"""Details of - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] - operation. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - output_info (google.cloud.aiplatform_v1.types.ExportModelOperationMetadata.OutputInfo): - Output only. Information further describing - the output of this Model export. - """ - - class OutputInfo(proto.Message): - r"""Further describes the output of the ExportModel. Supplements - [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1.ExportModelRequest.OutputConfig]. - - Attributes: - artifact_output_uri (str): - Output only. If the Model artifact is being - exported to Google Cloud Storage this is the - full path of the directory created, into which - the Model files are being written to. - image_output_uri (str): - Output only. If the Model image is being - exported to Google Container Registry or - Artifact Registry this is the full path of the - image created. - """ - - artifact_output_uri = proto.Field( - proto.STRING, - number=2, - ) - image_output_uri = proto.Field( - proto.STRING, - number=3, - ) - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - output_info = proto.Field( - proto.MESSAGE, - number=2, - message=OutputInfo, - ) - - -class ExportModelResponse(proto.Message): - r"""Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] - operation. - """ - - -class GetModelEvaluationRequest(proto.Message): - r"""Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. - - Attributes: - name (str): - Required. The name of the ModelEvaluation resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationsRequest(proto.Message): - r"""Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - - Attributes: - parent (str): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationsResponse.next_page_token] - of the previous - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelEvaluationsResponse(proto.Message): - r"""Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - - Attributes: - model_evaluations (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluation]): - List of ModelEvaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=model_evaluation.ModelEvaluation, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetModelEvaluationSliceRequest(proto.Message): - r"""Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. - - Attributes: - name (str): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationSlicesRequest(proto.Message): - r"""Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - - Attributes: - parent (str): - Required. The resource name of the ModelEvaluation to list - the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - filter (str): - The standard list filter. - - - ``slice.dimension`` - for =. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesResponse.next_page_token] - of the previous - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelEvaluationSlicesResponse(proto.Message): - r"""Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - - Attributes: - model_evaluation_slices (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]): - List of ModelEvaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluation_slices = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=model_evaluation_slice.ModelEvaluationSlice, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py deleted file mode 100644 index b5d0e5b613..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'GenericOperationMetadata', - 'DeleteOperationMetadata', - }, -) - - -class GenericOperationMetadata(proto.Message): - r"""Generic Metadata shared by all operations. - Attributes: - partial_failures (Sequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - E.g. single files that couldn't be read. - This field should never exceed 20 entries. - Status details field will contain standard GCP - error details. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - updated for the last time. If the operation has - finished (successfully or not), this is the - finish time. - """ - - partial_failures = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=status_pb2.Status, - ) - create_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteOperationMetadata(proto.Message): - r"""Details of operations that perform deletes of any entities. - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message='GenericOperationMetadata', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py deleted file mode 100644 index 28007208b5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py +++ /dev/null @@ -1,381 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateTrainingPipelineRequest', - 'GetTrainingPipelineRequest', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'DeleteTrainingPipelineRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'GetPipelineJobRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'DeletePipelineJobRequest', - 'CancelPipelineJobRequest', - }, -) - - -class CreateTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline): - Required. The TrainingPipeline to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - training_pipeline = proto.Field( - proto.MESSAGE, - number=2, - message=gca_training_pipeline.TrainingPipeline, - ) - - -class GetTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline resource. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTrainingPipelinesRequest(proto.Message): - r"""Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Lists the PipelineJobs that match the filter expression. The - following fields are supported: - - - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. - - Filter expressions can be combined together using logical - operators (``AND`` & ``OR``). For example: - ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. - - The syntax to define filter expression is based on - https://google.aip.dev/160. - - Examples: - - - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` - PipelineJobs created or updated after 2020-05-18 00:00:00 - UTC. - - ``labels.env = "prod"`` PipelineJobs with label "env" set - to "prod". - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token] - of the previous - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListTrainingPipelinesResponse(proto.Message): - r"""Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] - - Attributes: - training_pipelines (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline]): - List of TrainingPipelines in the requested - page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - training_pipelines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_training_pipeline.TrainingPipeline, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline resource to be - deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreatePipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): - Required. The PipelineJob to create. - pipeline_job_id (str): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not provided, an - ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - pipeline_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_pipeline_job.PipelineJob, - ) - pipeline_job_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetPipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListPipelineJobsRequest(proto.Message): - r"""Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. Supported fields: - - - ``display_name`` supports ``=`` and ``!=``. - - ``state`` supports ``=`` and ``!=``. - - The following examples demonstrate how to filter the list of - PipelineJobs: - - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - - ``NOT display_name="my_pipeline"`` - - ``state="PIPELINE_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token] - of the previous - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] - call. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - - -class ListPipelineJobsResponse(proto.Message): - r"""Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] - - Attributes: - pipeline_jobs (Sequence[google.cloud.aiplatform_v1.types.PipelineJob]): - List of PipelineJobs in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1.ListPipelineJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - pipeline_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_pipeline_job.PipelineJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeletePipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelPipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py deleted file mode 100644 index 0b41968239..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'PipelineState', - }, -) - - -class PipelineState(proto.Enum): - r"""Describes the state of a pipeline.""" - PIPELINE_STATE_UNSPECIFIED = 0 - PIPELINE_STATE_QUEUED = 1 - PIPELINE_STATE_PENDING = 2 - PIPELINE_STATE_RUNNING = 3 - PIPELINE_STATE_SUCCEEDED = 4 - PIPELINE_STATE_FAILED = 5 - PIPELINE_STATE_CANCELLING = 6 - PIPELINE_STATE_CANCELLED = 7 - PIPELINE_STATE_PAUSED = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py deleted file mode 100644 index 50f2f7baa9..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'PredictRequest', - 'PredictResponse', - }, -) - - -class PredictRequest(proto.Message): - r"""Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint requested to serve the - prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the prediction - call. A DeployedModel may have an upper limit on the number - of instances it supports per request, and when it is - exceeded the prediction call errors in case of AutoML - Models, or, in case of customer created Models, the - behaviour is as documented by that Model. The schema of any - single instance may be specified via Endpoint's - DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of the - parameters may be specified via Endpoint's DeployedModels' - [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - instances = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - parameters = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - -class PredictResponse(proto.Message): - r"""Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - - Attributes: - predictions (Sequence[google.protobuf.struct_pb2.Value]): - The predictions that are the output of the predictions call. - The schema of any single prediction may be specified via - Endpoint's DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. - deployed_model_id (str): - ID of the Endpoint's DeployedModel that - served this prediction. - """ - - predictions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py deleted file mode 100644 index 15ef6b0616..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'SpecialistPool', - }, -) - - -class SpecialistPool(proto.Message): - r"""SpecialistPool represents customers' own workforce to work on - their data labeling jobs. It includes a group of specialist - managers who are responsible for managing the labelers in this - pool as well as customers' data labeling jobs associated with - this pool. - Customers create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the jobs using - CrowdCompute console. - - Attributes: - name (str): - Required. The resource name of the - SpecialistPool. - display_name (str): - Required. The user-defined name of the - SpecialistPool. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - This field should be unique on project-level. - specialist_managers_count (int): - Output only. The number of Specialists in - this SpecialistPool. - specialist_manager_emails (Sequence[str]): - The email addresses of the specialists in the - SpecialistPool. - pending_data_labeling_jobs (Sequence[str]): - Output only. The resource name of the pending - data labeling jobs. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - specialist_managers_count = proto.Field( - proto.INT32, - number=3, - ) - specialist_manager_emails = proto.RepeatedField( - proto.STRING, - number=4, - ) - pending_data_labeling_jobs = proto.RepeatedField( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py deleted file mode 100644 index b853dc8ece..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import operation -from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateSpecialistPoolRequest', - 'CreateSpecialistPoolOperationMetadata', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'DeleteSpecialistPoolRequest', - 'UpdateSpecialistPoolRequest', - 'UpdateSpecialistPoolOperationMetadata', - }, -) - - -class CreateSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. - - Attributes: - parent (str): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): - Required. The SpecialistPool to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - specialist_pool = proto.Field( - proto.MESSAGE, - number=2, - message=gca_specialist_pool.SpecialistPool, - ) - - -class CreateSpecialistPoolOperationMetadata(proto.Message): - r"""Runtime operation information for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. - - Attributes: - name (str): - Required. The name of the SpecialistPool resource. The form - is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListSpecialistPoolsRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - - Attributes: - parent (str): - Required. The name of the SpecialistPool's parent resource. - Format: ``projects/{project}/locations/{location}`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained by - [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1.ListSpecialistPoolsResponse.next_page_token] - of the previous - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools] - call. Return first page if empty. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - FieldMask represents a set of - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=4, - message=field_mask_pb2.FieldMask, - ) - - -class ListSpecialistPoolsResponse(proto.Message): - r"""Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - - Attributes: - specialist_pools (Sequence[google.cloud.aiplatform_v1.types.SpecialistPool]): - A list of SpecialistPools that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - specialist_pools = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_specialist_pool.SpecialistPool, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. - - Attributes: - name (str): - Required. The resource name of the SpecialistPool to delete. - Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - force (bool): - If set to true, any specialist managers in - this SpecialistPool will also be deleted. - (Otherwise, the request will only work if the - SpecialistPool has no specialist managers.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class UpdateSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. - - Attributes: - specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): - Required. The SpecialistPool which replaces - the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the - resource. - """ - - specialist_pool = proto.Field( - proto.MESSAGE, - number=1, - message=gca_specialist_pool.SpecialistPool, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateSpecialistPoolOperationMetadata(proto.Message): - r"""Runtime operation metadata for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. - - Attributes: - specialist_pool (str): - Output only. The name of the SpecialistPool to which the - specialists are being added. Format: - ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`` - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - specialist_pool = proto.Field( - proto.STRING, - number=1, - ) - generic_metadata = proto.Field( - proto.MESSAGE, - number=2, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py deleted file mode 100644 index 6c22549254..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py +++ /dev/null @@ -1,609 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Trial', - 'StudySpec', - 'Measurement', - }, -) - - -class Trial(proto.Message): - r"""A message representing a Trial. A Trial contains a unique set - of Parameters that has been or will be evaluated, along with the - objective metrics got by running the Trial. - - Attributes: - name (str): - Output only. Resource name of the Trial - assigned by the service. - id (str): - Output only. The identifier of the Trial - assigned by the service. - state (google.cloud.aiplatform_v1.types.Trial.State): - Output only. The detailed state of the Trial. - parameters (Sequence[google.cloud.aiplatform_v1.types.Trial.Parameter]): - Output only. The parameters of the Trial. - final_measurement (google.cloud.aiplatform_v1.types.Measurement): - Output only. The final measurement containing - the objective value. - measurements (Sequence[google.cloud.aiplatform_v1.types.Measurement]): - Output only. A list of measurements that are strictly - lexicographically ordered by their induced tuples (steps, - elapsed_duration). These are used for early stopping - computations. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the Trial was started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the Trial's status changed to - ``SUCCEEDED`` or ``INFEASIBLE``. - client_id (str): - Output only. The identifier of the client that originally - requested this Trial. Each client is identified by a unique - client_id. When a client asks for a suggestion, Vizier will - assign it a Trial. The client should evaluate the Trial, - complete it, and report back to Vizier. If suggestion is - asked again by same client_id before the Trial is completed, - the same Trial will be returned. Multiple clients with - different client_ids can ask for suggestions simultaneously, - each of them will get their own Trial. - infeasible_reason (str): - Output only. A human readable string describing why the - Trial is infeasible. This is set only if Trial state is - ``INFEASIBLE``. - custom_job (str): - Output only. The CustomJob name linked to the - Trial. It's set for a HyperparameterTuningJob's - Trial. - """ - class State(proto.Enum): - r"""Describes a Trial state.""" - STATE_UNSPECIFIED = 0 - REQUESTED = 1 - ACTIVE = 2 - STOPPING = 3 - SUCCEEDED = 4 - INFEASIBLE = 5 - - class Parameter(proto.Message): - r"""A message representing a parameter to be tuned. - Attributes: - parameter_id (str): - Output only. The ID of the parameter. The parameter should - be defined in [StudySpec's - Parameters][google.cloud.aiplatform.v1.StudySpec.parameters]. - value (google.protobuf.struct_pb2.Value): - Output only. The value of the parameter. ``number_value`` - will be set if a parameter defined in StudySpec is in type - 'INTEGER', 'DOUBLE' or 'DISCRETE'. ``string_value`` will be - set if a parameter defined in StudySpec is in type - 'CATEGORICAL'. - """ - - parameter_id = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - id = proto.Field( - proto.STRING, - number=2, - ) - state = proto.Field( - proto.ENUM, - number=3, - enum=State, - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=Parameter, - ) - final_measurement = proto.Field( - proto.MESSAGE, - number=5, - message='Measurement', - ) - measurements = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='Measurement', - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - client_id = proto.Field( - proto.STRING, - number=9, - ) - infeasible_reason = proto.Field( - proto.STRING, - number=10, - ) - custom_job = proto.Field( - proto.STRING, - number=11, - ) - - -class StudySpec(proto.Message): - r"""Represents specification of a Study. - Attributes: - metrics (Sequence[google.cloud.aiplatform_v1.types.StudySpec.MetricSpec]): - Required. Metric specs for the Study. - parameters (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec]): - Required. The set of parameters to tune. - algorithm (google.cloud.aiplatform_v1.types.StudySpec.Algorithm): - The search algorithm specified for the Study. - observation_noise (google.cloud.aiplatform_v1.types.StudySpec.ObservationNoise): - The observation noise level of the study. - Currently only supported by the Vizier service. - Not supported by HyperparamterTuningJob or - TrainingPipeline. - measurement_selection_type (google.cloud.aiplatform_v1.types.StudySpec.MeasurementSelectionType): - Describe which measurement selection type - will be used - """ - class Algorithm(proto.Enum): - r"""The available search algorithms for the Study.""" - ALGORITHM_UNSPECIFIED = 0 - GRID_SEARCH = 2 - RANDOM_SEARCH = 3 - - class ObservationNoise(proto.Enum): - r"""Describes the noise level of the repeated observations. - "Noisy" means that the repeated observations with the same Trial - parameters may lead to different metric evaluations. - """ - OBSERVATION_NOISE_UNSPECIFIED = 0 - LOW = 1 - HIGH = 2 - - class MeasurementSelectionType(proto.Enum): - r"""This indicates which measurement to use if/when the service - automatically selects the final measurement from previously reported - intermediate measurements. Choose this based on two considerations: - A) Do you expect your measurements to monotonically improve? If so, - choose LAST_MEASUREMENT. On the other hand, if you're in a situation - where your system can "over-train" and you expect the performance to - get better for a while but then start declining, choose - BEST_MEASUREMENT. B) Are your measurements significantly noisy - and/or irreproducible? If so, BEST_MEASUREMENT will tend to be - over-optimistic, and it may be better to choose LAST_MEASUREMENT. If - both or neither of (A) and (B) apply, it doesn't matter which - selection type is chosen. - """ - MEASUREMENT_SELECTION_TYPE_UNSPECIFIED = 0 - LAST_MEASUREMENT = 1 - BEST_MEASUREMENT = 2 - - class MetricSpec(proto.Message): - r"""Represents a metric to optimize. - Attributes: - metric_id (str): - Required. The ID of the metric. Must not - contain whitespaces and must be unique amongst - all MetricSpecs. - goal (google.cloud.aiplatform_v1.types.StudySpec.MetricSpec.GoalType): - Required. The optimization goal of the - metric. - """ - class GoalType(proto.Enum): - r"""The available types of optimization goals.""" - GOAL_TYPE_UNSPECIFIED = 0 - MAXIMIZE = 1 - MINIMIZE = 2 - - metric_id = proto.Field( - proto.STRING, - number=1, - ) - goal = proto.Field( - proto.ENUM, - number=2, - enum='StudySpec.MetricSpec.GoalType', - ) - - class ParameterSpec(proto.Message): - r"""Represents a single parameter to optimize. - Attributes: - double_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DoubleValueSpec): - The value spec for a 'DOUBLE' parameter. - integer_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.IntegerValueSpec): - The value spec for an 'INTEGER' parameter. - categorical_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.CategoricalValueSpec): - The value spec for a 'CATEGORICAL' parameter. - discrete_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DiscreteValueSpec): - The value spec for a 'DISCRETE' parameter. - parameter_id (str): - Required. The ID of the parameter. Must not - contain whitespaces and must be unique amongst - all ParameterSpecs. - scale_type (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ScaleType): - How the parameter should be scaled. Leave unset for - ``CATEGORICAL`` parameters. - conditional_parameter_specs (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec]): - A conditional parameter node is active if the parameter's - value matches the conditional node's parent_value_condition. - - If two items in conditional_parameter_specs have the same - name, they must have disjoint parent_value_condition. - """ - class ScaleType(proto.Enum): - r"""The type of scaling that should be applied to this parameter.""" - SCALE_TYPE_UNSPECIFIED = 0 - UNIT_LINEAR_SCALE = 1 - UNIT_LOG_SCALE = 2 - UNIT_REVERSE_LOG_SCALE = 3 - - class DoubleValueSpec(proto.Message): - r"""Value specification for a parameter in ``DOUBLE`` type. - Attributes: - min_value (float): - Required. Inclusive minimum value of the - parameter. - max_value (float): - Required. Inclusive maximum value of the - parameter. - default_value (float): - A default value for a ``DOUBLE`` parameter that is assumed - to be a relatively good starting point. Unset value signals - that there is no offered starting point. - - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. - """ - - min_value = proto.Field( - proto.DOUBLE, - number=1, - ) - max_value = proto.Field( - proto.DOUBLE, - number=2, - ) - default_value = proto.Field( - proto.DOUBLE, - number=4, - optional=True, - ) - - class IntegerValueSpec(proto.Message): - r"""Value specification for a parameter in ``INTEGER`` type. - Attributes: - min_value (int): - Required. Inclusive minimum value of the - parameter. - max_value (int): - Required. Inclusive maximum value of the - parameter. - default_value (int): - A default value for an ``INTEGER`` parameter that is assumed - to be a relatively good starting point. Unset value signals - that there is no offered starting point. - - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. - """ - - min_value = proto.Field( - proto.INT64, - number=1, - ) - max_value = proto.Field( - proto.INT64, - number=2, - ) - default_value = proto.Field( - proto.INT64, - number=4, - optional=True, - ) - - class CategoricalValueSpec(proto.Message): - r"""Value specification for a parameter in ``CATEGORICAL`` type. - Attributes: - values (Sequence[str]): - Required. The list of possible categories. - default_value (str): - A default value for a ``CATEGORICAL`` parameter that is - assumed to be a relatively good starting point. Unset value - signals that there is no offered starting point. - - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - default_value = proto.Field( - proto.STRING, - number=3, - optional=True, - ) - - class DiscreteValueSpec(proto.Message): - r"""Value specification for a parameter in ``DISCRETE`` type. - Attributes: - values (Sequence[float]): - Required. A list of possible values. - The list should be in increasing order and at - least 1e-10 apart. For instance, this parameter - might have possible settings of 1.5, 2.5, and - 4.0. This list should not contain more than - 1,000 values. - default_value (float): - A default value for a ``DISCRETE`` parameter that is assumed - to be a relatively good starting point. Unset value signals - that there is no offered starting point. It automatically - rounds to the nearest feasible discrete point. - - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - default_value = proto.Field( - proto.DOUBLE, - number=3, - optional=True, - ) - - class ConditionalParameterSpec(proto.Message): - r"""Represents a parameter spec with condition from its parent - parameter. - - Attributes: - parent_discrete_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition): - The spec for matching values from a parent parameter of - ``DISCRETE`` type. - parent_int_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition): - The spec for matching values from a parent parameter of - ``INTEGER`` type. - parent_categorical_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition): - The spec for matching values from a parent parameter of - ``CATEGORICAL`` type. - parameter_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec): - Required. The spec for a conditional - parameter. - """ - - class DiscreteValueCondition(proto.Message): - r"""Represents the spec to match discrete values from parent - parameter. - - Attributes: - values (Sequence[float]): - Required. Matches values of the parent parameter of - 'DISCRETE' type. All values must exist in - ``discrete_value_spec`` of parent parameter. - - The Epsilon of the value matching is 1e-10. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - - class IntValueCondition(proto.Message): - r"""Represents the spec to match integer values from parent - parameter. - - Attributes: - values (Sequence[int]): - Required. Matches values of the parent parameter of - 'INTEGER' type. All values must lie in - ``integer_value_spec`` of parent parameter. - """ - - values = proto.RepeatedField( - proto.INT64, - number=1, - ) - - class CategoricalValueCondition(proto.Message): - r"""Represents the spec to match categorical values from parent - parameter. - - Attributes: - values (Sequence[str]): - Required. Matches values of the parent parameter of - 'CATEGORICAL' type. All values must exist in - ``categorical_value_spec`` of parent parameter. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - parent_discrete_values = proto.Field( - proto.MESSAGE, - number=2, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition', - ) - parent_int_values = proto.Field( - proto.MESSAGE, - number=3, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition', - ) - parent_categorical_values = proto.Field( - proto.MESSAGE, - number=4, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition', - ) - parameter_spec = proto.Field( - proto.MESSAGE, - number=1, - message='StudySpec.ParameterSpec', - ) - - double_value_spec = proto.Field( - proto.MESSAGE, - number=2, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.DoubleValueSpec', - ) - integer_value_spec = proto.Field( - proto.MESSAGE, - number=3, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.IntegerValueSpec', - ) - categorical_value_spec = proto.Field( - proto.MESSAGE, - number=4, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.CategoricalValueSpec', - ) - discrete_value_spec = proto.Field( - proto.MESSAGE, - number=5, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.DiscreteValueSpec', - ) - parameter_id = proto.Field( - proto.STRING, - number=1, - ) - scale_type = proto.Field( - proto.ENUM, - number=6, - enum='StudySpec.ParameterSpec.ScaleType', - ) - conditional_parameter_specs = proto.RepeatedField( - proto.MESSAGE, - number=10, - message='StudySpec.ParameterSpec.ConditionalParameterSpec', - ) - - metrics = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=MetricSpec, - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=ParameterSpec, - ) - algorithm = proto.Field( - proto.ENUM, - number=3, - enum=Algorithm, - ) - observation_noise = proto.Field( - proto.ENUM, - number=6, - enum=ObservationNoise, - ) - measurement_selection_type = proto.Field( - proto.ENUM, - number=7, - enum=MeasurementSelectionType, - ) - - -class Measurement(proto.Message): - r"""A message representing a Measurement of a Trial. A - Measurement contains the Metrics got by executing a Trial using - suggested hyperparameter values. - - Attributes: - elapsed_duration (google.protobuf.duration_pb2.Duration): - Output only. Time that the Trial has been - running at the point of this Measurement. - step_count (int): - Output only. The number of steps the machine - learning model has been trained for. Must be - non-negative. - metrics (Sequence[google.cloud.aiplatform_v1.types.Measurement.Metric]): - Output only. A list of metrics got by - evaluating the objective functions using - suggested Parameter values. - """ - - class Metric(proto.Message): - r"""A message representing a metric in the measurement. - Attributes: - metric_id (str): - Output only. The ID of the Metric. The Metric should be - defined in [StudySpec's - Metrics][google.cloud.aiplatform.v1.StudySpec.metrics]. - value (float): - Output only. The value for this metric. - """ - - metric_id = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.DOUBLE, - number=2, - ) - - elapsed_duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - step_count = proto.Field( - proto.INT64, - number=2, - ) - metrics = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Metric, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py deleted file mode 100644 index a48ebdbea6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py +++ /dev/null @@ -1,547 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import pipeline_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'TrainingPipeline', - 'InputDataConfig', - 'FractionSplit', - 'FilterSplit', - 'PredefinedSplit', - 'TimestampSplit', - }, -) - - -class TrainingPipeline(proto.Message): - r"""The TrainingPipeline orchestrates tasks associated with training a - Model. It always executes the training task, and optionally may also - export data from Vertex AI's Dataset which becomes the training - input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - Attributes: - name (str): - Output only. Resource name of the - TrainingPipeline. - display_name (str): - Required. The user-defined name of this - TrainingPipeline. - input_data_config (google.cloud.aiplatform_v1.types.InputDataConfig): - Specifies Vertex AI owned input data that may be used for - training the Model. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] - should make clear whether this config is used and if there - are any special requirements on how it should be filled. If - nothing about this config is mentioned in the - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], - then it should be assumed that the TrainingPipeline does not - depend on this configuration. - training_task_definition (str): - Required. A Google Cloud Storage path to the - YAML file that defines the training task which - is responsible for producing the model artifact, - and may also include additional auxiliary work. - The definition files that can be used here are - found in gs://google-cloud- - aiplatform/schema/trainingjob/definition/. Note: - The URI given on output will be immutable and - probably different, including the URI scheme, - than the one given on input. The output URI will - point to a location where the user only has a - read access. - training_task_inputs (google.protobuf.struct_pb2.Value): - Required. The training task's parameter(s), as specified in - the - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s - ``inputs``. - training_task_metadata (google.protobuf.struct_pb2.Value): - Output only. The metadata information as specified in the - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s - ``metadata``. This metadata is an auxiliary runtime and - final information about the training task. While the - pipeline is running this information is populated only at a - best effort basis. Only present if the pipeline's - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] - contains ``metadata`` object. - model_to_upload (google.cloud.aiplatform_v1.types.Model): - Describes the Model that may be uploaded (via - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]) - by this TrainingPipeline. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] - should make clear whether this Model description should be - populated, and if there are any special requirements - regarding how it should be filled. If nothing is mentioned - in the - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], - then it should be assumed that this field should not be - filled and the training task either uploads the Model - without a need of this information, or that training task - does not support uploading a Model as part of the pipeline. - When the Pipeline's state becomes - ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been - uploaded into Vertex AI, then the model_to_upload's resource - [name][google.cloud.aiplatform.v1.Model.name] is populated. - The Model is always uploaded into the Project and Location - in which this pipeline is. - state (google.cloud.aiplatform_v1.types.PipelineState): - Output only. The detailed state of the - pipeline. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the pipeline's state is - ``PIPELINE_STATE_FAILED`` or ``PIPELINE_STATE_CANCELLED``. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline - was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline for the first - time entered the ``PIPELINE_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline entered any of - the following states: ``PIPELINE_STATE_SUCCEEDED``, - ``PIPELINE_STATE_FAILED``, ``PIPELINE_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline.LabelsEntry]): - The labels with user-defined metadata to - organize TrainingPipelines. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a TrainingPipeline. - If set, this TrainingPipeline will be secured by this key. - - Note: Model trained by this TrainingPipeline is also secured - by this key if - [model_to_upload][google.cloud.aiplatform.v1.TrainingPipeline.encryption_spec] - is not set separately. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - input_data_config = proto.Field( - proto.MESSAGE, - number=3, - message='InputDataConfig', - ) - training_task_definition = proto.Field( - proto.STRING, - number=4, - ) - training_task_inputs = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.Value, - ) - training_task_metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - model_to_upload = proto.Field( - proto.MESSAGE, - number=7, - message=model.Model, - ) - state = proto.Field( - proto.ENUM, - number=9, - enum=pipeline_state.PipelineState, - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=15, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=18, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class InputDataConfig(proto.Message): - r"""Specifies Vertex AI owned input data to be used for training, - and possibly evaluating, the Model. - - Attributes: - fraction_split (google.cloud.aiplatform_v1.types.FractionSplit): - Split based on fractions defining the size of - each set. - filter_split (google.cloud.aiplatform_v1.types.FilterSplit): - Split based on the provided filters for each - set. - predefined_split (google.cloud.aiplatform_v1.types.PredefinedSplit): - Supported only for tabular Datasets. - Split based on a predefined key. - timestamp_split (google.cloud.aiplatform_v1.types.TimestampSplit): - Supported only for tabular Datasets. - Split based on the timestamp of the input data - pieces. - gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): - The Cloud Storage location where the training data is to be - written to. In the given directory a new directory is - created with name: - ``dataset---`` - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format. All training input data is written into that - directory. - - The Vertex AI environment variables representing Cloud - Storage data URIs are represented in the Cloud Storage - wildcard format to support sharded data. e.g.: - "gs://.../training-*.jsonl" - - - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for - tabular data - - - AIP_TRAINING_DATA_URI = - "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" - - - AIP_VALIDATION_DATA_URI = - "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" - - - AIP_TEST_DATA_URI = - "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". - bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): - Only applicable to custom training with tabular Dataset with - BigQuery source. - - The BigQuery project location where the training data is to - be written to. In the given project a new dataset is created - with name - ``dataset___`` - where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All - training input data is written into that dataset. In the - dataset three tables are created, ``training``, - ``validation`` and ``test``. - - - AIP_DATA_FORMAT = "bigquery". - - - AIP_TRAINING_DATA_URI = - "bigquery_destination.dataset\_\ **\ .training" - - - AIP_VALIDATION_DATA_URI = - "bigquery_destination.dataset\_\ **\ .validation" - - - AIP_TEST_DATA_URI = - "bigquery_destination.dataset\_\ **\ .test". - dataset_id (str): - Required. The ID of the Dataset in the same Project and - Location which data will be used to train the Model. The - Dataset must use schema compatible with Model being trained, - and what is compatible should be described in the used - TrainingPipeline's [training_task_definition] - [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]. - For tabular Datasets, all their data is exported to - training, to pick and choose from. - annotations_filter (str): - Applicable only to Datasets that have DataItems and - Annotations. - - A filter on Annotations of the Dataset. Only Annotations - that both match this filter and belong to DataItems not - ignored by the split method are used in respectively - training, validation or test role, depending on the role of - the DataItem they are on (for the auto-assigned that role is - decided by Vertex AI). A filter with same syntax as the one - used in - [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations] - may be used, but note here it filters across all Annotations - of the Dataset, and not just within a single DataItem. - annotation_schema_uri (str): - Applicable only to custom training with Datasets that have - DataItems and Annotations. - - Cloud Storage URI that points to a YAML file describing the - annotation schema. The schema is defined as an OpenAPI 3.0.2 - `Schema - Object `__. - The schema files that can be used here are found in - gs://google-cloud-aiplatform/schema/dataset/annotation/ , - note that the chosen schema must be consistent with - [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] - of the Dataset specified by - [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id]. - - Only Annotations that both match this schema and belong to - DataItems not ignored by the split method are used in - respectively training, validation or test role, depending on - the role of the DataItem they are on. - - When used in conjunction with - [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter], - the Annotations used for training are filtered by both - [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter] - and - [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]. - """ - - fraction_split = proto.Field( - proto.MESSAGE, - number=2, - oneof='split', - message='FractionSplit', - ) - filter_split = proto.Field( - proto.MESSAGE, - number=3, - oneof='split', - message='FilterSplit', - ) - predefined_split = proto.Field( - proto.MESSAGE, - number=4, - oneof='split', - message='PredefinedSplit', - ) - timestamp_split = proto.Field( - proto.MESSAGE, - number=5, - oneof='split', - message='TimestampSplit', - ) - gcs_destination = proto.Field( - proto.MESSAGE, - number=8, - oneof='destination', - message=io.GcsDestination, - ) - bigquery_destination = proto.Field( - proto.MESSAGE, - number=10, - oneof='destination', - message=io.BigQueryDestination, - ) - dataset_id = proto.Field( - proto.STRING, - number=1, - ) - annotations_filter = proto.Field( - proto.STRING, - number=6, - ) - annotation_schema_uri = proto.Field( - proto.STRING, - number=9, - ) - - -class FractionSplit(proto.Message): - r"""Assigns the input data to training, validation, and test sets as per - the given fractions. Any of ``training_fraction``, - ``validation_fraction`` and ``test_fraction`` may optionally be - provided, they must sum to up to 1. If the provided ones sum to less - than 1, the remainder is assigned to sets as decided by Vertex AI. - If none of the fractions are set, by default roughly 80% of data is - used for training, 10% for validation, and 10% for test. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - - -class FilterSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on the given filters, data pieces not matched by any - filter are ignored. Currently only supported for Datasets - containing DataItems. - If any of the filters in this message are to match nothing, then - they can be set as '-' (the minus sign). - - Supported only for unstructured Datasets. - - Attributes: - training_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to train the Model. A filter - with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - validation_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to validate the Model. A - filter with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - test_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to test the Model. A filter - with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - """ - - training_filter = proto.Field( - proto.STRING, - number=1, - ) - validation_filter = proto.Field( - proto.STRING, - number=2, - ) - test_filter = proto.Field( - proto.STRING, - number=3, - ) - - -class PredefinedSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on the value of a provided key. - - Supported only for tabular Datasets. - - Attributes: - key (str): - Required. The key is a name of one of the Dataset's data - columns. The value of the key (either the label's value or - value in the column) must be one of {``training``, - ``validation``, ``test``}, and it defines to which set the - given piece of data is assigned. If for a piece of data the - key is not present or has an invalid value, that piece is - ignored by the pipeline. - """ - - key = proto.Field( - proto.STRING, - number=1, - ) - - -class TimestampSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on a provided timestamps. The youngest data pieces are - assigned to training set, next to validation set, and the oldest - to the test set. - Supported only for tabular Datasets. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - key (str): - Required. The key is a name of one of the Dataset's data - columns. The values of the key (the values in the column) - must be in RFC 3339 ``date-time`` format, where - ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). If - for a piece of data the key is not present or has an invalid - value, that piece is ignored by the pipeline. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - key = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py deleted file mode 100644 index ad8683bb9b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'UserActionReference', - }, -) - - -class UserActionReference(proto.Message): - r"""References an API call. It contains more information about - long running operation and Jobs that are triggered by the API - call. - - Attributes: - operation (str): - For API calls that return a long running - operation. Resource name of the long running - operation. Format: - 'projects/{project}/locations/{location}/operations/{operation}' - data_labeling_job (str): - For API calls that start a LabelingJob. Resource name of the - LabelingJob. Format: - 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' - method (str): - The method name of the API RPC call. For - example, - "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". - """ - - operation = proto.Field( - proto.STRING, - number=1, - oneof='reference', - ) - data_labeling_job = proto.Field( - proto.STRING, - number=2, - oneof='reference', - ) - method = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py deleted file mode 100644 index d325e6cfda..0000000000 --- a/owl-bot-staging/v1/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1/schema/predict/instance/.coveragerc b/owl-bot-staging/v1/schema/predict/instance/.coveragerc deleted file mode 100644 index 8bd7ec75f6..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1/schema/predict/instance/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/schema/predict/instance/MANIFEST.in b/owl-bot-staging/v1/schema/predict/instance/MANIFEST.in deleted file mode 100644 index 05ea9a66cb..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1/schema/predict/instance *.py -recursive-include google/cloud/aiplatform/v1/schema/predict/instance_v1 *.py diff --git a/owl-bot-staging/v1/schema/predict/instance/README.rst b/owl-bot-staging/v1/schema/predict/instance/README.rst deleted file mode 100644 index 5e0175e9e4..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1 Schema Predict Instance API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1 Schema Predict Instance API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/schema/predict/instance/docs/conf.py b/owl-bot-staging/v1/schema/predict/instance/docs/conf.py deleted file mode 100644 index a12fb006b9..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1-schema-predict-instance documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1-schema-predict-instance" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1 Schema Predict Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1-schema-predict-instance-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-predict-instance.tex", - u"google-cloud-aiplatform-v1-schema-predict-instance Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-predict-instance", - u"Google Cloud Aiplatform V1 Schema Predict Instance Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-predict-instance", - u"google-cloud-aiplatform-v1-schema-predict-instance Documentation", - author, - "google-cloud-aiplatform-v1-schema-predict-instance", - "GAPIC library for Google Cloud Aiplatform V1 Schema Predict Instance API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/schema/predict/instance/docs/index.rst b/owl-bot-staging/v1/schema/predict/instance/docs/index.rst deleted file mode 100644 index e6c2d156ca..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - instance_v1/services - instance_v1/types diff --git a/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/services.rst b/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/services.rst deleted file mode 100644 index 50c011c69a..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Predict Instance v1 API -====================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/types.rst b/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/types.rst deleted file mode 100644 index 564ab013ee..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/docs/instance_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Predict Instance v1 API -=================================================================== - -.. automodule:: google.cloud.aiplatform.v1.schema.predict.instance_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py deleted file mode 100644 index 41d6704c1f..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_classification import ImageClassificationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_object_detection import ImageObjectDetectionPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_segmentation import ImageSegmentationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_classification import TextClassificationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_extraction import TextExtractionPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_sentiment import TextSentimentPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_action_recognition import VideoActionRecognitionPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_classification import VideoClassificationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ('ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed deleted file mode 100644 index f70e7f605a..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py deleted file mode 100644 index 41ab5407a7..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionInstance -from .types.image_object_detection import ImageObjectDetectionPredictionInstance -from .types.image_segmentation import ImageSegmentationPredictionInstance -from .types.text_classification import TextClassificationPredictionInstance -from .types.text_extraction import TextExtractionPredictionInstance -from .types.text_sentiment import TextSentimentPredictionInstance -from .types.video_action_recognition import VideoActionRecognitionPredictionInstance -from .types.video_classification import VideoClassificationPredictionInstance -from .types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ( -'ImageClassificationPredictionInstance', -'ImageObjectDetectionPredictionInstance', -'ImageSegmentationPredictionInstance', -'TextClassificationPredictionInstance', -'TextExtractionPredictionInstance', -'TextSentimentPredictionInstance', -'VideoActionRecognitionPredictionInstance', -'VideoClassificationPredictionInstance', -'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json deleted file mode 100644 index 0ae909d6ea..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.instance_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.predict.instance", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed deleted file mode 100644 index f70e7f605a..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py deleted file mode 100644 index 80a5332604..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionInstance, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionInstance, -) -from .image_segmentation import ( - ImageSegmentationPredictionInstance, -) -from .text_classification import ( - TextClassificationPredictionInstance, -) -from .text_extraction import ( - TextExtractionPredictionInstance, -) -from .text_sentiment import ( - TextSentimentPredictionInstance, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionInstance, -) -from .video_classification import ( - VideoClassificationPredictionInstance, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionInstance, -) - -__all__ = ( - 'ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py deleted file mode 100644 index 02e95ba9c7..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'ImageClassificationPredictionInstance', - }, -) - - -class ImageClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Image Classification. - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py deleted file mode 100644 index 0b9aadc101..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'ImageObjectDetectionPredictionInstance', - }, -) - - -class ImageObjectDetectionPredictionInstance(proto.Message): - r"""Prediction input format for Image Object Detection. - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py deleted file mode 100644 index f967807e6c..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'ImageSegmentationPredictionInstance', - }, -) - - -class ImageSegmentationPredictionInstance(proto.Message): - r"""Prediction input format for Image Segmentation. - Attributes: - content (str): - The image bytes to make the predictions on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/png - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py deleted file mode 100644 index 4eec13516c..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'TextClassificationPredictionInstance', - }, -) - - -class TextClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Text Classification. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py deleted file mode 100644 index ffecb3de51..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'TextExtractionPredictionInstance', - }, -) - - -class TextExtractionPredictionInstance(proto.Message): - r"""Prediction input format for Text Extraction. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - key (str): - This field is only used for batch prediction. - If a key is provided, the batch prediction - result will by mapped to this key. If omitted, - then the batch prediction result will contain - the entire input instance. Vertex AI will not - check if keys in the request are duplicates, so - it is up to the caller to ensure the keys are - unique. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - key = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py deleted file mode 100644 index 5bdfe5d5ba..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'TextSentimentPredictionInstance', - }, -) - - -class TextSentimentPredictionInstance(proto.Message): - r"""Prediction input format for Text Sentiment. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py deleted file mode 100644 index d53782868f..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'VideoActionRecognitionPredictionInstance', - }, -) - - -class VideoActionRecognitionPredictionInstance(proto.Message): - r"""Prediction input format for Video Action Recognition. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py deleted file mode 100644 index b51ab464a4..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'VideoClassificationPredictionInstance', - }, -) - - -class VideoClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Video Classification. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py deleted file mode 100644 index 8b96f75fd2..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'VideoObjectTrackingPredictionInstance', - }, -) - - -class VideoObjectTrackingPredictionInstance(proto.Message): - r"""Prediction input format for Video Object Tracking. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/instance/mypy.ini b/owl-bot-staging/v1/schema/predict/instance/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1/schema/predict/instance/noxfile.py b/owl-bot-staging/v1/schema/predict/instance/noxfile.py deleted file mode 100644 index c8d234927c..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1/schema/predict/instance_v1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1/schema/predict/instance/scripts/fixup_instance_v1_keywords.py b/owl-bot-staging/v1/schema/predict/instance/scripts/fixup_instance_v1_keywords.py deleted file mode 100644 index d9e8bd0b17..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/scripts/fixup_instance_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class instanceCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=instanceCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the instance client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/schema/predict/instance/setup.py b/owl-bot-staging/v1/schema/predict/instance/setup.py deleted file mode 100644 index 9449a0a922..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1-schema-predict-instance', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.predict'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1/schema/predict/instance/tests/__init__.py b/owl-bot-staging/v1/schema/predict/instance/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/instance/tests/unit/__init__.py b/owl-bot-staging/v1/schema/predict/instance/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py b/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/instance/tests/unit/gapic/instance_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/params/.coveragerc b/owl-bot-staging/v1/schema/predict/params/.coveragerc deleted file mode 100644 index 06aa51376d..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1/schema/predict/params/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/schema/predict/params/MANIFEST.in b/owl-bot-staging/v1/schema/predict/params/MANIFEST.in deleted file mode 100644 index b990fba651..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1/schema/predict/params *.py -recursive-include google/cloud/aiplatform/v1/schema/predict/params_v1 *.py diff --git a/owl-bot-staging/v1/schema/predict/params/README.rst b/owl-bot-staging/v1/schema/predict/params/README.rst deleted file mode 100644 index 60be3b2705..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1 Schema Predict Params API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1 Schema Predict Params API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/schema/predict/params/docs/conf.py b/owl-bot-staging/v1/schema/predict/params/docs/conf.py deleted file mode 100644 index 6917071403..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1-schema-predict-params documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1-schema-predict-params" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1 Schema Predict Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1-schema-predict-params-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-predict-params.tex", - u"google-cloud-aiplatform-v1-schema-predict-params Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-predict-params", - u"Google Cloud Aiplatform V1 Schema Predict Params Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-predict-params", - u"google-cloud-aiplatform-v1-schema-predict-params Documentation", - author, - "google-cloud-aiplatform-v1-schema-predict-params", - "GAPIC library for Google Cloud Aiplatform V1 Schema Predict Params API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/schema/predict/params/docs/index.rst b/owl-bot-staging/v1/schema/predict/params/docs/index.rst deleted file mode 100644 index e90a1a5814..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - params_v1/services - params_v1/types diff --git a/owl-bot-staging/v1/schema/predict/params/docs/params_v1/services.rst b/owl-bot-staging/v1/schema/predict/params/docs/params_v1/services.rst deleted file mode 100644 index bf08ea6e98..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/docs/params_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Predict Params v1 API -==================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/schema/predict/params/docs/params_v1/types.rst b/owl-bot-staging/v1/schema/predict/params/docs/params_v1/types.rst deleted file mode 100644 index 956ef5224d..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/docs/params_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Predict Params v1 API -================================================================= - -.. automodule:: google.cloud.aiplatform.v1.schema.predict.params_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py deleted file mode 100644 index 91ae7f0d5c..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_classification import ImageClassificationPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_object_detection import ImageObjectDetectionPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_segmentation import ImageSegmentationPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_action_recognition import VideoActionRecognitionPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_classification import VideoClassificationPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ('ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed deleted file mode 100644 index df96e61590..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py deleted file mode 100644 index 91b718b437..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionParams -from .types.image_object_detection import ImageObjectDetectionPredictionParams -from .types.image_segmentation import ImageSegmentationPredictionParams -from .types.video_action_recognition import VideoActionRecognitionPredictionParams -from .types.video_classification import VideoClassificationPredictionParams -from .types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ( -'ImageClassificationPredictionParams', -'ImageObjectDetectionPredictionParams', -'ImageSegmentationPredictionParams', -'VideoActionRecognitionPredictionParams', -'VideoClassificationPredictionParams', -'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json deleted file mode 100644 index edfffb441b..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.params_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.predict.params", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed deleted file mode 100644 index df96e61590..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py deleted file mode 100644 index 70a92bb59c..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionParams, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionParams, -) -from .image_segmentation import ( - ImageSegmentationPredictionParams, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionParams, -) -from .video_classification import ( - VideoClassificationPredictionParams, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionParams, -) - -__all__ = ( - 'ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py deleted file mode 100644 index 1668600544..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'ImageClassificationPredictionParams', - }, -) - - -class ImageClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Classification. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py deleted file mode 100644 index 43c7814607..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'ImageObjectDetectionPredictionParams', - }, -) - - -class ImageObjectDetectionPredictionParams(proto.Message): - r"""Prediction model parameters for Image Object Detection. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - Note that number of returned predictions is also - limited by metadata's predictionsLimit. Default - value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py deleted file mode 100644 index 695a3a7745..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'ImageSegmentationPredictionParams', - }, -) - - -class ImageSegmentationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Segmentation. - Attributes: - confidence_threshold (float): - When the model predicts category of pixels of - the image, it will only provide predictions for - pixels that it is at least this much confident - about. All other pixels will be classified as - background. Default value is 0.5. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py deleted file mode 100644 index 88e714e9cf..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'VideoActionRecognitionPredictionParams', - }, -) - - -class VideoActionRecognitionPredictionParams(proto.Message): - r"""Prediction model parameters for Video Action Recognition. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py deleted file mode 100644 index 06b2347eb6..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'VideoClassificationPredictionParams', - }, -) - - -class VideoClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Video Classification. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is - 10,000. - segment_classification (bool): - Set to true to request segment-level - classification. Vertex AI returns labels and - their confidence scores for the entire time - segment of the video that user specified in the - input instance. Default value is true - shot_classification (bool): - Set to true to request shot-level - classification. Vertex AI determines the - boundaries for each camera shot in the entire - time segment of the video that user specified in - the input instance. Vertex AI then returns - labels and their confidence scores for each - detected shot, along with the start and end time - of the shot. - WARNING: Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. - Default value is false - one_sec_interval_classification (bool): - Set to true to request classification for a - video at one-second intervals. Vertex AI returns - labels and their confidence scores for each - second of the entire time segment of the video - that user specified in the input WARNING: Model - evaluation is not done for this classification - type, the quality of it depends on the training - data, but there are no metrics provided to - describe that quality. Default value is false - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - segment_classification = proto.Field( - proto.BOOL, - number=3, - ) - shot_classification = proto.Field( - proto.BOOL, - number=4, - ) - one_sec_interval_classification = proto.Field( - proto.BOOL, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py deleted file mode 100644 index 820a73e3c6..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'VideoObjectTrackingPredictionParams', - }, -) - - -class VideoObjectTrackingPredictionParams(proto.Message): - r"""Prediction model parameters for Video Object Tracking. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - min_bounding_box_size (float): - Only bounding boxes with shortest edge at - least that long as a relative value of video - frame size are returned. Default value is 0.0. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - min_bounding_box_size = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/params/mypy.ini b/owl-bot-staging/v1/schema/predict/params/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1/schema/predict/params/noxfile.py b/owl-bot-staging/v1/schema/predict/params/noxfile.py deleted file mode 100644 index e886ab4759..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1/schema/predict/params_v1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1/schema/predict/params/scripts/fixup_params_v1_keywords.py b/owl-bot-staging/v1/schema/predict/params/scripts/fixup_params_v1_keywords.py deleted file mode 100644 index 17915abed8..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/scripts/fixup_params_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class paramsCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=paramsCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the params client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/schema/predict/params/setup.py b/owl-bot-staging/v1/schema/predict/params/setup.py deleted file mode 100644 index 94507f278c..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1-schema-predict-params', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.predict'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1/schema/predict/params/tests/__init__.py b/owl-bot-staging/v1/schema/predict/params/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/params/tests/unit/__init__.py b/owl-bot-staging/v1/schema/predict/params/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/params_v1/__init__.py b/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/params_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/params/tests/unit/gapic/params_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/prediction/.coveragerc b/owl-bot-staging/v1/schema/predict/prediction/.coveragerc deleted file mode 100644 index a49b3b6dd8..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/schema/predict/prediction/MANIFEST.in b/owl-bot-staging/v1/schema/predict/prediction/MANIFEST.in deleted file mode 100644 index 4cca31bd6b..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1/schema/predict/prediction *.py -recursive-include google/cloud/aiplatform/v1/schema/predict/prediction_v1 *.py diff --git a/owl-bot-staging/v1/schema/predict/prediction/README.rst b/owl-bot-staging/v1/schema/predict/prediction/README.rst deleted file mode 100644 index acb7f67d13..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1 Schema Predict Prediction API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1 Schema Predict Prediction API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/schema/predict/prediction/docs/conf.py b/owl-bot-staging/v1/schema/predict/prediction/docs/conf.py deleted file mode 100644 index c0f73900a9..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1-schema-predict-prediction documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1-schema-predict-prediction" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1 Schema Predict Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1-schema-predict-prediction-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-predict-prediction.tex", - u"google-cloud-aiplatform-v1-schema-predict-prediction Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-predict-prediction", - u"Google Cloud Aiplatform V1 Schema Predict Prediction Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-predict-prediction", - u"google-cloud-aiplatform-v1-schema-predict-prediction Documentation", - author, - "google-cloud-aiplatform-v1-schema-predict-prediction", - "GAPIC library for Google Cloud Aiplatform V1 Schema Predict Prediction API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/schema/predict/prediction/docs/index.rst b/owl-bot-staging/v1/schema/predict/prediction/docs/index.rst deleted file mode 100644 index f28df12991..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - prediction_v1/services - prediction_v1/types diff --git a/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/services.rst b/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/services.rst deleted file mode 100644 index ad6f034387..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API -======================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/types.rst b/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/types.rst deleted file mode 100644 index a97faf34de..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/docs/prediction_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API -===================================================================== - -.. automodule:: google.cloud.aiplatform.v1.schema.predict.prediction_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py deleted file mode 100644 index 27d9f97862..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.classification import ClassificationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_object_detection import ImageObjectDetectionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_segmentation import ImageSegmentationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_classification import TabularClassificationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_regression import TabularRegressionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_extraction import TextExtractionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_sentiment import TextSentimentPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_action_recognition import VideoActionRecognitionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_classification import VideoClassificationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ('ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed deleted file mode 100644 index 472fa4d8cc..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py deleted file mode 100644 index 3cf9304526..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.classification import ClassificationPredictionResult -from .types.image_object_detection import ImageObjectDetectionPredictionResult -from .types.image_segmentation import ImageSegmentationPredictionResult -from .types.tabular_classification import TabularClassificationPredictionResult -from .types.tabular_regression import TabularRegressionPredictionResult -from .types.text_extraction import TextExtractionPredictionResult -from .types.text_sentiment import TextSentimentPredictionResult -from .types.video_action_recognition import VideoActionRecognitionPredictionResult -from .types.video_classification import VideoClassificationPredictionResult -from .types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ( -'ClassificationPredictionResult', -'ImageObjectDetectionPredictionResult', -'ImageSegmentationPredictionResult', -'TabularClassificationPredictionResult', -'TabularRegressionPredictionResult', -'TextExtractionPredictionResult', -'TextSentimentPredictionResult', -'VideoActionRecognitionPredictionResult', -'VideoClassificationPredictionResult', -'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json deleted file mode 100644 index ba1d67a00c..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.prediction_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.predict.prediction", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed deleted file mode 100644 index 472fa4d8cc..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py deleted file mode 100644 index b7b7c056aa..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .classification import ( - ClassificationPredictionResult, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionResult, -) -from .image_segmentation import ( - ImageSegmentationPredictionResult, -) -from .tabular_classification import ( - TabularClassificationPredictionResult, -) -from .tabular_regression import ( - TabularRegressionPredictionResult, -) -from .text_extraction import ( - TextExtractionPredictionResult, -) -from .text_sentiment import ( - TextSentimentPredictionResult, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionResult, -) -from .video_classification import ( - VideoClassificationPredictionResult, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionResult, -) - -__all__ = ( - 'ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py deleted file mode 100644 index f14b084951..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'ClassificationPredictionResult', - }, -) - - -class ClassificationPredictionResult(proto.Message): - r"""Prediction output format for Image and Text Classification. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py deleted file mode 100644 index 74178c5502..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'ImageObjectDetectionPredictionResult', - }, -) - - -class ImageObjectDetectionPredictionResult(proto.Message): - r"""Prediction output format for Image Object Detection. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): - Bounding boxes, i.e. the rectangles over the image, that - pinpoint the found AnnotationSpecs. Given in order that - matches the IDs. Each bounding box is an array of 4 numbers - ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent - the extremal coordinates of the box. They are relative to - the image size, and the point 0,0 is in the top left of the - image. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - bboxes = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=struct_pb2.ListValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py deleted file mode 100644 index e93991222a..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'ImageSegmentationPredictionResult', - }, -) - - -class ImageSegmentationPredictionResult(proto.Message): - r"""Prediction output format for Image Segmentation. - Attributes: - category_mask (str): - A PNG image where each pixel in the mask - represents the category in which the pixel in - the original image was predicted to belong to. - The size of this image will be the same as the - original image. The mapping between the - AnntoationSpec and the color can be found in - model's metadata. The model will choose the most - likely category and if none of the categories - reach the confidence threshold, the pixel will - be marked as background. - confidence_mask (str): - A one channel image which is encoded as an - 8bit lossless PNG. The size of the image will be - the same as the original image. For a specific - pixel, darker color means less confidence in - correctness of the cateogry in the categoryMask - for the corresponding pixel. Black means no - confidence and white means complete confidence. - """ - - category_mask = proto.Field( - proto.STRING, - number=1, - ) - confidence_mask = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py deleted file mode 100644 index a36bf8f991..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TabularClassificationPredictionResult', - }, -) - - -class TabularClassificationPredictionResult(proto.Message): - r"""Prediction output format for Tabular Classification. - Attributes: - classes (Sequence[str]): - The name of the classes being classified, - contains all possible values of the target - column. - scores (Sequence[float]): - The model's confidence in each class being - correct, higher value means higher confidence. - The N-th score corresponds to the N-th class in - classes. - """ - - classes = proto.RepeatedField( - proto.STRING, - number=1, - ) - scores = proto.RepeatedField( - proto.FLOAT, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py deleted file mode 100644 index 56af2af196..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TabularRegressionPredictionResult', - }, -) - - -class TabularRegressionPredictionResult(proto.Message): - r"""Prediction output format for Tabular Regression. - Attributes: - value (float): - The regression value. - lower_bound (float): - The lower bound of the prediction interval. - upper_bound (float): - The upper bound of the prediction interval. - """ - - value = proto.Field( - proto.FLOAT, - number=1, - ) - lower_bound = proto.Field( - proto.FLOAT, - number=2, - ) - upper_bound = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py deleted file mode 100644 index 3e7398f165..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TextExtractionPredictionResult', - }, -) - - -class TextExtractionPredictionResult(proto.Message): - r"""Prediction output format for Text Extraction. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - text_segment_start_offsets (Sequence[int]): - The start offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - text_segment_end_offsets (Sequence[int]): - The end offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - text_segment_start_offsets = proto.RepeatedField( - proto.INT64, - number=3, - ) - text_segment_end_offsets = proto.RepeatedField( - proto.INT64, - number=4, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py deleted file mode 100644 index 135db45729..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TextSentimentPredictionResult', - }, -) - - -class TextSentimentPredictionResult(proto.Message): - r"""Prediction output format for Text Sentiment - Attributes: - sentiment (int): - The integer sentiment labels between 0 - (inclusive) and sentimentMax label (inclusive), - while 0 maps to the least positive sentiment and - sentimentMax maps to the most positive one. The - higher the score is, the more positive the - sentiment in the text snippet is. Note: - sentimentMax is an integer value between 1 - (inclusive) and 10 (inclusive). - """ - - sentiment = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py deleted file mode 100644 index 5a853655ae..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'VideoActionRecognitionPredictionResult', - }, -) - - -class VideoActionRecognitionPredictionResult(proto.Message): - r"""Prediction output format for Video Action Recognition. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py deleted file mode 100644 index da14b3253e..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'VideoClassificationPredictionResult', - }, -) - - -class VideoClassificationPredictionResult(proto.Message): - r"""Prediction output format for Video Classification. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - type_ (str): - The type of the prediction. The requested - types can be configured via parameters. This - will be one of - segment-classification - - shot-classification - - one-sec-interval-classification - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentStart' from the - input instance, for other types it is the start - of a shot or a 1 second interval respectively. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentEnd' from the - input instance, for other types it is the end of - a shot or a 1 second interval respectively. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - type_ = proto.Field( - proto.STRING, - number=3, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py deleted file mode 100644 index 9b70e913cd..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'VideoObjectTrackingPredictionResult', - }, -) - - -class VideoObjectTrackingPredictionResult(proto.Message): - r"""Prediction output format for Video Object Tracking. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - frames (Sequence[google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.VideoObjectTrackingPredictionResult.Frame]): - All of the frames of the video in which a - single object instance has been detected. The - bounding boxes in the frames identify the same - object. - """ - - class Frame(proto.Message): - r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a - bounding box, i.e. the rectangle over the video frame pinpointing - the found AnnotationSpec. The coordinates are relative to the frame - size, and the point 0,0 is in the top left of the frame. - - Attributes: - time_offset (google.protobuf.duration_pb2.Duration): - A time (frame) of a video in which the object - has been detected. Expressed as a number of - seconds as measured from the start of the video, - with fractions up to a microsecond precision, - and with "s" appended at the end. - x_min (google.protobuf.wrappers_pb2.FloatValue): - The leftmost coordinate of the bounding box. - x_max (google.protobuf.wrappers_pb2.FloatValue): - The rightmost coordinate of the bounding box. - y_min (google.protobuf.wrappers_pb2.FloatValue): - The topmost coordinate of the bounding box. - y_max (google.protobuf.wrappers_pb2.FloatValue): - The bottommost coordinate of the bounding - box. - """ - - time_offset = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - x_min = proto.Field( - proto.MESSAGE, - number=2, - message=wrappers_pb2.FloatValue, - ) - x_max = proto.Field( - proto.MESSAGE, - number=3, - message=wrappers_pb2.FloatValue, - ) - y_min = proto.Field( - proto.MESSAGE, - number=4, - message=wrappers_pb2.FloatValue, - ) - y_max = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=3, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - frames = proto.RepeatedField( - proto.MESSAGE, - number=6, - message=Frame, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/schema/predict/prediction/mypy.ini b/owl-bot-staging/v1/schema/predict/prediction/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1/schema/predict/prediction/noxfile.py b/owl-bot-staging/v1/schema/predict/prediction/noxfile.py deleted file mode 100644 index 8ea8ef006a..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1/schema/predict/prediction_v1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py b/owl-bot-staging/v1/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py deleted file mode 100644 index 7860bb63cf..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/scripts/fixup_prediction_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class predictionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=predictionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the prediction client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/schema/predict/prediction/setup.py b/owl-bot-staging/v1/schema/predict/prediction/setup.py deleted file mode 100644 index 3456e20d11..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1-schema-predict-prediction', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.predict'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1/schema/predict/prediction/tests/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/prediction/tests/unit/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py b/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/schema/predict/prediction/tests/unit/gapic/prediction_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py deleted file mode 100644 index 01e80ac919..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py +++ /dev/null @@ -1,240 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class aiplatformCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'batch_migrate_resources': ('parent', 'migrate_resource_requests', ), - 'cancel_batch_prediction_job': ('name', ), - 'cancel_custom_job': ('name', ), - 'cancel_data_labeling_job': ('name', ), - 'cancel_hyperparameter_tuning_job': ('name', ), - 'cancel_pipeline_job': ('name', ), - 'cancel_training_pipeline': ('name', ), - 'create_batch_prediction_job': ('parent', 'batch_prediction_job', ), - 'create_custom_job': ('parent', 'custom_job', ), - 'create_data_labeling_job': ('parent', 'data_labeling_job', ), - 'create_dataset': ('parent', 'dataset', ), - 'create_endpoint': ('parent', 'endpoint', ), - 'create_hyperparameter_tuning_job': ('parent', 'hyperparameter_tuning_job', ), - 'create_pipeline_job': ('parent', 'pipeline_job', 'pipeline_job_id', ), - 'create_specialist_pool': ('parent', 'specialist_pool', ), - 'create_training_pipeline': ('parent', 'training_pipeline', ), - 'delete_batch_prediction_job': ('name', ), - 'delete_custom_job': ('name', ), - 'delete_data_labeling_job': ('name', ), - 'delete_dataset': ('name', ), - 'delete_endpoint': ('name', ), - 'delete_hyperparameter_tuning_job': ('name', ), - 'delete_model': ('name', ), - 'delete_pipeline_job': ('name', ), - 'delete_specialist_pool': ('name', 'force', ), - 'delete_training_pipeline': ('name', ), - 'deploy_model': ('endpoint', 'deployed_model', 'traffic_split', ), - 'export_data': ('name', 'export_config', ), - 'export_model': ('name', 'output_config', ), - 'get_annotation_spec': ('name', 'read_mask', ), - 'get_batch_prediction_job': ('name', ), - 'get_custom_job': ('name', ), - 'get_data_labeling_job': ('name', ), - 'get_dataset': ('name', 'read_mask', ), - 'get_endpoint': ('name', ), - 'get_hyperparameter_tuning_job': ('name', ), - 'get_model': ('name', ), - 'get_model_evaluation': ('name', ), - 'get_model_evaluation_slice': ('name', ), - 'get_pipeline_job': ('name', ), - 'get_specialist_pool': ('name', ), - 'get_training_pipeline': ('name', ), - 'import_data': ('name', 'import_configs', ), - 'list_annotations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_batch_prediction_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_custom_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_data_items': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_data_labeling_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_hyperparameter_tuning_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_model_evaluation_slices': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_models': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_pipeline_jobs': ('parent', 'filter', 'page_size', 'page_token', ), - 'list_specialist_pools': ('parent', 'page_size', 'page_token', 'read_mask', ), - 'list_training_pipelines': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'predict': ('endpoint', 'instances', 'parameters', ), - 'search_migratable_resources': ('parent', 'page_size', 'page_token', 'filter', ), - 'undeploy_model': ('endpoint', 'deployed_model_id', 'traffic_split', ), - 'update_dataset': ('dataset', 'update_mask', ), - 'update_endpoint': ('endpoint', 'update_mask', ), - 'update_model': ('model', 'update_mask', ), - 'update_specialist_pool': ('specialist_pool', 'update_mask', ), - 'upload_model': ('parent', 'model', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=aiplatformCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the aiplatform client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py deleted file mode 100644 index 4759d2731c..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class definitionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=definitionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the definition client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py deleted file mode 100644 index d9e8bd0b17..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class instanceCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=instanceCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the instance client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py deleted file mode 100644 index 17915abed8..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class paramsCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=paramsCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the params client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py deleted file mode 100644 index 7860bb63cf..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class predictionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=predictionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the prediction client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py deleted file mode 100644 index cf6098bbed..0000000000 --- a/owl-bot-staging/v1/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1-schema-trainingjob-definition', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.trainingjob'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py deleted file mode 100644 index 83ede00e4c..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py +++ /dev/null @@ -1,3983 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.dataset_service import DatasetServiceAsyncClient -from google.cloud.aiplatform_v1.services.dataset_service import DatasetServiceClient -from google.cloud.aiplatform_v1.services.dataset_service import pagers -from google.cloud.aiplatform_v1.services.dataset_service import transports -from google.cloud.aiplatform_v1.services.dataset_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert DatasetServiceClient._get_default_mtls_endpoint(None) is None - assert DatasetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.DatasetServiceGrpcTransport, "grpc"), - (transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_dataset_service_client_get_transport_class(): - transport = DatasetServiceClient.get_transport_class() - available_transports = [ - transports.DatasetServiceGrpcTransport, - ] - assert transport in available_transports - - transport = DatasetServiceClient.get_transport_class("grpc") - assert transport == transports.DatasetServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) -@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) -def test_dataset_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) -@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_dataset_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_dataset_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = DatasetServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_dataset(transport: str = 'grpc', request_type=dataset_service.CreateDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_dataset_from_dict(): - test_create_dataset(request_type=dict) - - -def test_create_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - client.create_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - -@pytest.mark.asyncio -async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.CreateDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_dataset_async_from_dict(): - await test_create_dataset_async(request_type=dict) - - -def test_create_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.CreateDatasetRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.CreateDatasetRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].dataset == gca_dataset.Dataset(name='name_value') - - -def test_create_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_dataset( - dataset_service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].dataset == gca_dataset.Dataset(name='name_value') - - -@pytest.mark.asyncio -async def test_create_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_dataset( - dataset_service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - -def test_get_dataset(transport: str = 'grpc', request_type=dataset_service.GetDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset( - name='name_value', - display_name='display_name_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_get_dataset_from_dict(): - test_get_dataset(request_type=dict) - - -def test_get_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - client.get_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - -@pytest.mark.asyncio -async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( - name='name_value', - display_name='display_name_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_dataset_async_from_dict(): - await test_get_dataset_async(request_type=dict) - - -def test_get_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = dataset.Dataset() - client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_dataset( - dataset_service.GetDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_dataset( - dataset_service.GetDatasetRequest(), - name='name_value', - ) - - -def test_update_dataset(transport: str = 'grpc', request_type=dataset_service.UpdateDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_update_dataset_from_dict(): - test_update_dataset(request_type=dict) - - -def test_update_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - client.update_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - -@pytest.mark.asyncio -async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.UpdateDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_dataset_async_from_dict(): - await test_update_dataset_async(request_type=dict) - - -def test_update_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.UpdateDatasetRequest() - - request.dataset.name = 'dataset.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = gca_dataset.Dataset() - client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=dataset.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.UpdateDatasetRequest() - - request.dataset.name = 'dataset.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=dataset.name/value', - ) in kw['metadata'] - - -def test_update_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_dataset( - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].dataset == gca_dataset.Dataset(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_dataset( - dataset_service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_dataset( - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].dataset == gca_dataset.Dataset(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_dataset( - dataset_service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_datasets(transport: str = 'grpc', request_type=dataset_service.ListDatasetsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_datasets_from_dict(): - test_list_datasets(request_type=dict) - - -def test_list_datasets_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - client.list_datasets() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - -@pytest.mark.asyncio -async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDatasetsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_datasets_async_from_dict(): - await test_list_datasets_async(request_type=dict) - - -def test_list_datasets_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDatasetsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = dataset_service.ListDatasetsResponse() - client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_datasets_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDatasetsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) - await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_datasets_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_datasets_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_datasets( - dataset_service.ListDatasetsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_datasets_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_datasets_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_datasets( - dataset_service.ListDatasetsRequest(), - parent='parent_value', - ) - - -def test_list_datasets_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_datasets(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in results) - -def test_list_datasets_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = list(client.list_datasets(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_datasets_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_datasets(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in responses) - -@pytest.mark.asyncio -async def test_list_datasets_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_datasets(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_dataset(transport: str = 'grpc', request_type=dataset_service.DeleteDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_dataset_from_dict(): - test_delete_dataset(request_type=dict) - - -def test_delete_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - client.delete_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - -@pytest.mark.asyncio -async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.DeleteDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_dataset_async_from_dict(): - await test_delete_dataset_async(request_type=dict) - - -def test_delete_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.DeleteDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.DeleteDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_dataset( - dataset_service.DeleteDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_dataset( - dataset_service.DeleteDatasetRequest(), - name='name_value', - ) - - -def test_import_data(transport: str = 'grpc', request_type=dataset_service.ImportDataRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_import_data_from_dict(): - test_import_data(request_type=dict) - - -def test_import_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - client.import_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - -@pytest.mark.asyncio -async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ImportDataRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_import_data_async_from_dict(): - await test_import_data_async(request_type=dict) - - -def test_import_data_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ImportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_import_data_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ImportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_import_data_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.import_data( - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].import_configs == [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] - - -def test_import_data_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_data( - dataset_service.ImportDataRequest(), - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - -@pytest.mark.asyncio -async def test_import_data_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.import_data( - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].import_configs == [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] - - -@pytest.mark.asyncio -async def test_import_data_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.import_data( - dataset_service.ImportDataRequest(), - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - -def test_export_data(transport: str = 'grpc', request_type=dataset_service.ExportDataRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_data_from_dict(): - test_export_data(request_type=dict) - - -def test_export_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - client.export_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - -@pytest.mark.asyncio -async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ExportDataRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_data_async_from_dict(): - await test_export_data_async(request_type=dict) - - -def test_export_data_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ExportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_data_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ExportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_export_data_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_data( - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].export_config == dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - - -def test_export_data_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_data( - dataset_service.ExportDataRequest(), - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -@pytest.mark.asyncio -async def test_export_data_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_data( - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].export_config == dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - - -@pytest.mark.asyncio -async def test_export_data_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_data( - dataset_service.ExportDataRequest(), - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -def test_list_data_items(transport: str = 'grpc', request_type=dataset_service.ListDataItemsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataItemsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_data_items_from_dict(): - test_list_data_items(request_type=dict) - - -def test_list_data_items_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - client.list_data_items() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - -@pytest.mark.asyncio -async def test_list_data_items_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDataItemsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataItemsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_data_items_async_from_dict(): - await test_list_data_items_async(request_type=dict) - - -def test_list_data_items_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDataItemsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - call.return_value = dataset_service.ListDataItemsResponse() - client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_data_items_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDataItemsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) - await client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_data_items_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_data_items( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_data_items_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_data_items( - dataset_service.ListDataItemsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_data_items_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_data_items( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_data_items_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_data_items( - dataset_service.ListDataItemsRequest(), - parent='parent_value', - ) - - -def test_list_data_items_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_data_items(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, data_item.DataItem) - for i in results) - -def test_list_data_items_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - pages = list(client.list_data_items(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_data_items_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_data_items(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, data_item.DataItem) - for i in responses) - -@pytest.mark.asyncio -async def test_list_data_items_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_data_items(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_annotation_spec(transport: str = 'grpc', request_type=dataset_service.GetAnnotationSpecRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - ) - response = client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -def test_get_annotation_spec_from_dict(): - test_get_annotation_spec(request_type=dict) - - -def test_get_annotation_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - client.get_annotation_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetAnnotationSpecRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - )) - response = await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async_from_dict(): - await test_get_annotation_spec_async(request_type=dict) - - -def test_get_annotation_spec_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetAnnotationSpecRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = annotation_spec.AnnotationSpec() - client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_annotation_spec_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetAnnotationSpecRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_annotation_spec_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_annotation_spec_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_annotation_spec( - dataset_service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_annotation_spec( - dataset_service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -def test_list_annotations(transport: str = 'grpc', request_type=dataset_service.ListAnnotationsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAnnotationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_annotations_from_dict(): - test_list_annotations(request_type=dict) - - -def test_list_annotations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - client.list_annotations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - -@pytest.mark.asyncio -async def test_list_annotations_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListAnnotationsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAnnotationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_annotations_async_from_dict(): - await test_list_annotations_async(request_type=dict) - - -def test_list_annotations_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListAnnotationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - call.return_value = dataset_service.ListAnnotationsResponse() - client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_annotations_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListAnnotationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) - await client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_annotations_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_annotations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_annotations_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_annotations( - dataset_service.ListAnnotationsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_annotations_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_annotations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_annotations_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_annotations( - dataset_service.ListAnnotationsRequest(), - parent='parent_value', - ) - - -def test_list_annotations_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_annotations(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, annotation.Annotation) - for i in results) - -def test_list_annotations_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_annotations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_annotations_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_annotations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, annotation.Annotation) - for i in responses) - -@pytest.mark.asyncio -async def test_list_annotations_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_annotations(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = DatasetServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.DatasetServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.DatasetServiceGrpcTransport, - ) - -def test_dataset_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.DatasetServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_dataset_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.DatasetServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_dataset', - 'get_dataset', - 'update_dataset', - 'list_datasets', - 'delete_dataset', - 'import_data', - 'export_data', - 'list_data_items', - 'get_annotation_spec', - 'list_annotations', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_dataset_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_dataset_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_dataset_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_dataset_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - DatasetServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_dataset_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - DatasetServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_dataset_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_dataset_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.DatasetServiceGrpcTransport, grpc_helpers), - (transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_dataset_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_dataset_service_host_no_port(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_dataset_service_host_with_port(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_dataset_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.DatasetServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_dataset_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.DatasetServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_dataset_service_grpc_lro_client(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_dataset_service_grpc_lro_async_client(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotation_path(): - project = "squid" - location = "clam" - dataset = "whelk" - data_item = "octopus" - annotation = "oyster" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) - actual = DatasetServiceClient.annotation_path(project, location, dataset, data_item, annotation) - assert expected == actual - - -def test_parse_annotation_path(): - expected = { - "project": "nudibranch", - "location": "cuttlefish", - "dataset": "mussel", - "data_item": "winkle", - "annotation": "nautilus", - } - path = DatasetServiceClient.annotation_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_annotation_path(path) - assert expected == actual - -def test_annotation_spec_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - annotation_spec = "clam" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - actual = DatasetServiceClient.annotation_spec_path(project, location, dataset, annotation_spec) - assert expected == actual - - -def test_parse_annotation_spec_path(): - expected = { - "project": "whelk", - "location": "octopus", - "dataset": "oyster", - "annotation_spec": "nudibranch", - } - path = DatasetServiceClient.annotation_spec_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_annotation_spec_path(path) - assert expected == actual - -def test_data_item_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - data_item = "nautilus" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) - actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item) - assert expected == actual - - -def test_parse_data_item_path(): - expected = { - "project": "scallop", - "location": "abalone", - "dataset": "squid", - "data_item": "clam", - } - path = DatasetServiceClient.data_item_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_data_item_path(path) - assert expected == actual - -def test_dataset_path(): - project = "whelk" - location = "octopus" - dataset = "oyster" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = DatasetServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nudibranch", - "location": "cuttlefish", - "dataset": "mussel", - } - path = DatasetServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = DatasetServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = DatasetServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = DatasetServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = DatasetServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = DatasetServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = DatasetServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = DatasetServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = DatasetServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = DatasetServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = DatasetServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = DatasetServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py deleted file mode 100644 index f8a6e964b4..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ /dev/null @@ -1,2865 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceAsyncClient -from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceClient -from google.cloud.aiplatform_v1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1.services.endpoint_service import transports -from google.cloud.aiplatform_v1.services.endpoint_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1.types import accelerator_type -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert EndpointServiceClient._get_default_mtls_endpoint(None) is None - assert EndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.EndpointServiceGrpcTransport, "grpc"), - (transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_endpoint_service_client_get_transport_class(): - transport = EndpointServiceClient.get_transport_class() - available_transports = [ - transports.EndpointServiceGrpcTransport, - ] - assert transport in available_transports - - transport = EndpointServiceClient.get_transport_class("grpc") - assert transport == transports.EndpointServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) -@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) -def test_endpoint_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "true"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "false"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) -@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_endpoint_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = EndpointServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_endpoint(transport: str = 'grpc', request_type=endpoint_service.CreateEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_endpoint_from_dict(): - test_create_endpoint(request_type=dict) - - -def test_create_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - client.create_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - -@pytest.mark.asyncio -async def test_create_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.CreateEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_endpoint_async_from_dict(): - await test_create_endpoint_async(request_type=dict) - - -def test_create_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.CreateEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.CreateEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_endpoint( - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') - - -def test_create_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_endpoint( - endpoint_service.CreateEndpointRequest(), - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_endpoint( - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') - - -@pytest.mark.asyncio -async def test_create_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_endpoint( - endpoint_service.CreateEndpointRequest(), - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - ) - - -def test_get_endpoint(transport: str = 'grpc', request_type=endpoint_service.GetEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_get_endpoint_from_dict(): - test_get_endpoint(request_type=dict) - - -def test_get_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - client.get_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - -@pytest.mark.asyncio -async def test_get_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.GetEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_endpoint_async_from_dict(): - await test_get_endpoint_async(request_type=dict) - - -def test_get_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.GetEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - call.return_value = endpoint.Endpoint() - client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.GetEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) - await client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_endpoint( - endpoint_service.GetEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_endpoint( - endpoint_service.GetEndpointRequest(), - name='name_value', - ) - - -def test_list_endpoints(transport: str = 'grpc', request_type=endpoint_service.ListEndpointsRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEndpointsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_endpoints_from_dict(): - test_list_endpoints(request_type=dict) - - -def test_list_endpoints_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - client.list_endpoints() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - -@pytest.mark.asyncio -async def test_list_endpoints_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.ListEndpointsRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEndpointsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_endpoints_async_from_dict(): - await test_list_endpoints_async(request_type=dict) - - -def test_list_endpoints_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.ListEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - call.return_value = endpoint_service.ListEndpointsResponse() - client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_endpoints_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.ListEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) - await client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_endpoints_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_endpoints_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_endpoints( - endpoint_service.ListEndpointsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_endpoints_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_endpoints_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_endpoints( - endpoint_service.ListEndpointsRequest(), - parent='parent_value', - ) - - -def test_list_endpoints_pager(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_endpoints(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, endpoint.Endpoint) - for i in results) - -def test_list_endpoints_pages(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - pages = list(client.list_endpoints(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_endpoints_async_pager(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_endpoints(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, endpoint.Endpoint) - for i in responses) - -@pytest.mark.asyncio -async def test_list_endpoints_async_pages(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_endpoints(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_endpoint(transport: str = 'grpc', request_type=endpoint_service.UpdateEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_update_endpoint_from_dict(): - test_update_endpoint(request_type=dict) - - -def test_update_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - client.update_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - -@pytest.mark.asyncio -async def test_update_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UpdateEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_endpoint_async_from_dict(): - await test_update_endpoint_async(request_type=dict) - - -def test_update_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UpdateEndpointRequest() - - request.endpoint.name = 'endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - call.return_value = gca_endpoint.Endpoint() - client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint.name=endpoint.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UpdateEndpointRequest() - - request.endpoint.name = 'endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) - await client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint.name=endpoint.name/value', - ) in kw['metadata'] - - -def test_update_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_endpoint( - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_endpoint( - endpoint_service.UpdateEndpointRequest(), - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_endpoint( - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_endpoint( - endpoint_service.UpdateEndpointRequest(), - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_endpoint(transport: str = 'grpc', request_type=endpoint_service.DeleteEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_endpoint_from_dict(): - test_delete_endpoint(request_type=dict) - - -def test_delete_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - client.delete_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - -@pytest.mark.asyncio -async def test_delete_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeleteEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_endpoint_async_from_dict(): - await test_delete_endpoint_async(request_type=dict) - - -def test_delete_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeleteEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeleteEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_endpoint( - endpoint_service.DeleteEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_endpoint( - endpoint_service.DeleteEndpointRequest(), - name='name_value', - ) - - -def test_deploy_model(transport: str = 'grpc', request_type=endpoint_service.DeployModelRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_deploy_model_from_dict(): - test_deploy_model(request_type=dict) - - -def test_deploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - client.deploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - -@pytest.mark.asyncio -async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeployModelRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_deploy_model_async_from_dict(): - await test_deploy_model_async(request_type=dict) - - -def test_deploy_model_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_deploy_model_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_deploy_model_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.deploy_model( - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].deployed_model == gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) - assert args[0].traffic_split == {'key_value': 541} - - -def test_deploy_model_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_model( - endpoint_service.DeployModelRequest(), - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - -@pytest.mark.asyncio -async def test_deploy_model_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.deploy_model( - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].deployed_model == gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) - assert args[0].traffic_split == {'key_value': 541} - - -@pytest.mark.asyncio -async def test_deploy_model_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.deploy_model( - endpoint_service.DeployModelRequest(), - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - -def test_undeploy_model(transport: str = 'grpc', request_type=endpoint_service.UndeployModelRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undeploy_model_from_dict(): - test_undeploy_model(request_type=dict) - - -def test_undeploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - client.undeploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - -@pytest.mark.asyncio -async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UndeployModelRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undeploy_model_async_from_dict(): - await test_undeploy_model_async(request_type=dict) - - -def test_undeploy_model_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UndeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undeploy_model_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UndeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_undeploy_model_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undeploy_model( - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].deployed_model_id == 'deployed_model_id_value' - assert args[0].traffic_split == {'key_value': 541} - - -def test_undeploy_model_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_model( - endpoint_service.UndeployModelRequest(), - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undeploy_model( - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].deployed_model_id == 'deployed_model_id_value' - assert args[0].traffic_split == {'key_value': 541} - - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undeploy_model( - endpoint_service.UndeployModelRequest(), - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = EndpointServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.EndpointServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.EndpointServiceGrpcTransport, - ) - -def test_endpoint_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.EndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_endpoint_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.EndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_endpoint', - 'get_endpoint', - 'list_endpoints', - 'update_endpoint', - 'delete_endpoint', - 'deploy_model', - 'undeploy_model', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_endpoint_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_endpoint_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_endpoint_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_endpoint_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - EndpointServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_endpoint_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - EndpointServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_endpoint_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_endpoint_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.EndpointServiceGrpcTransport, grpc_helpers), - (transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_endpoint_service_host_no_port(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_endpoint_service_host_with_port(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_endpoint_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.EndpointServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_endpoint_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.EndpointServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_endpoint_service_grpc_lro_client(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_service_grpc_lro_async_client(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = EndpointServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = EndpointServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = EndpointServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = EndpointServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_model_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = EndpointServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = EndpointServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = EndpointServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = EndpointServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = EndpointServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = EndpointServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = EndpointServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = EndpointServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = EndpointServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = EndpointServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = EndpointServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py deleted file mode 100644 index 5f08ed6740..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ /dev/null @@ -1,6666 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.job_service import JobServiceAsyncClient -from google.cloud.aiplatform_v1.services.job_service import JobServiceClient -from google.cloud.aiplatform_v1.services.job_service import pagers -from google.cloud.aiplatform_v1.services.job_service import transports -from google.cloud.aiplatform_v1.services.job_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1.types import accelerator_type -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import completion_stats -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import study -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert JobServiceClient._get_default_mtls_endpoint(None) is None - assert JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.JobServiceGrpcTransport, "grpc"), - (transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_job_service_client_get_transport_class(): - transport = JobServiceClient.get_transport_class() - available_transports = [ - transports.JobServiceGrpcTransport, - ] - assert transport in available_transports - - transport = JobServiceClient.get_transport_class("grpc") - assert transport == transports.JobServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) -@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) -def test_job_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) -@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_job_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_job_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = JobServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_custom_job(transport: str = 'grpc', request_type=job_service.CreateCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_custom_job_from_dict(): - test_create_custom_job(request_type=dict) - - -def test_create_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - client.create_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - -@pytest.mark.asyncio -async def test_create_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_custom_job_async_from_dict(): - await test_create_custom_job_async(request_type=dict) - - -def test_create_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateCustomJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - call.return_value = gca_custom_job.CustomJob() - client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateCustomJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) - await client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_custom_job( - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].custom_job == gca_custom_job.CustomJob(name='name_value') - - -def test_create_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_custom_job( - job_service.CreateCustomJobRequest(), - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_custom_job( - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].custom_job == gca_custom_job.CustomJob(name='name_value') - - -@pytest.mark.asyncio -async def test_create_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_custom_job( - job_service.CreateCustomJobRequest(), - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - -def test_get_custom_job(transport: str = 'grpc', request_type=job_service.GetCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_custom_job_from_dict(): - test_get_custom_job(request_type=dict) - - -def test_get_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - client.get_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - -@pytest.mark.asyncio -async def test_get_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_custom_job_async_from_dict(): - await test_get_custom_job_async(request_type=dict) - - -def test_get_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - call.return_value = custom_job.CustomJob() - client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) - await client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_custom_job( - job_service.GetCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_custom_job( - job_service.GetCustomJobRequest(), - name='name_value', - ) - - -def test_list_custom_jobs(transport: str = 'grpc', request_type=job_service.ListCustomJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCustomJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_custom_jobs_from_dict(): - test_list_custom_jobs(request_type=dict) - - -def test_list_custom_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - client.list_custom_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - -@pytest.mark.asyncio -async def test_list_custom_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListCustomJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCustomJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_from_dict(): - await test_list_custom_jobs_async(request_type=dict) - - -def test_list_custom_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListCustomJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - call.return_value = job_service.ListCustomJobsResponse() - client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_custom_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListCustomJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) - await client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_custom_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_custom_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_custom_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_custom_jobs( - job_service.ListCustomJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_custom_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_custom_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_custom_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_custom_jobs( - job_service.ListCustomJobsRequest(), - parent='parent_value', - ) - - -def test_list_custom_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_custom_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, custom_job.CustomJob) - for i in results) - -def test_list_custom_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_custom_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_custom_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, custom_job.CustomJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_custom_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_custom_job(transport: str = 'grpc', request_type=job_service.DeleteCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_custom_job_from_dict(): - test_delete_custom_job(request_type=dict) - - -def test_delete_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - client.delete_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - -@pytest.mark.asyncio -async def test_delete_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_custom_job_async_from_dict(): - await test_delete_custom_job_async(request_type=dict) - - -def test_delete_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_custom_job( - job_service.DeleteCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_custom_job( - job_service.DeleteCustomJobRequest(), - name='name_value', - ) - - -def test_cancel_custom_job(transport: str = 'grpc', request_type=job_service.CancelCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_custom_job_from_dict(): - test_cancel_custom_job(request_type=dict) - - -def test_cancel_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - client.cancel_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_custom_job_async_from_dict(): - await test_cancel_custom_job_async(request_type=dict) - - -def test_cancel_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - call.return_value = None - client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_custom_job( - job_service.CancelCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_custom_job( - job_service.CancelCustomJobRequest(), - name='name_value', - ) - - -def test_create_data_labeling_job(transport: str = 'grpc', request_type=job_service.CreateDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - ) - response = client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -def test_create_data_labeling_job_from_dict(): - test_create_data_labeling_job(request_type=dict) - - -def test_create_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - client.create_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - )) - response = await client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_async_from_dict(): - await test_create_data_labeling_job_async(request_type=dict) - - -def test_create_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateDataLabelingJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - call.return_value = gca_data_labeling_job.DataLabelingJob() - client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateDataLabelingJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) - await client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_data_labeling_job( - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].data_labeling_job == gca_data_labeling_job.DataLabelingJob(name='name_value') - - -def test_create_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_data_labeling_job( - job_service.CreateDataLabelingJobRequest(), - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_data_labeling_job( - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].data_labeling_job == gca_data_labeling_job.DataLabelingJob(name='name_value') - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_data_labeling_job( - job_service.CreateDataLabelingJobRequest(), - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - -def test_get_data_labeling_job(transport: str = 'grpc', request_type=job_service.GetDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - ) - response = client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -def test_get_data_labeling_job_from_dict(): - test_get_data_labeling_job(request_type=dict) - - -def test_get_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - client.get_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - )) - response = await client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_async_from_dict(): - await test_get_data_labeling_job_async(request_type=dict) - - -def test_get_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - call.return_value = data_labeling_job.DataLabelingJob() - client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) - await client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_data_labeling_job( - job_service.GetDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_data_labeling_job( - job_service.GetDataLabelingJobRequest(), - name='name_value', - ) - - -def test_list_data_labeling_jobs(transport: str = 'grpc', request_type=job_service.ListDataLabelingJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataLabelingJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_data_labeling_jobs_from_dict(): - test_list_data_labeling_jobs(request_type=dict) - - -def test_list_data_labeling_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - client.list_data_labeling_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListDataLabelingJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_from_dict(): - await test_list_data_labeling_jobs_async(request_type=dict) - - -def test_list_data_labeling_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListDataLabelingJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - call.return_value = job_service.ListDataLabelingJobsResponse() - client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListDataLabelingJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) - await client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_data_labeling_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_data_labeling_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_data_labeling_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_data_labeling_jobs( - job_service.ListDataLabelingJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_data_labeling_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_data_labeling_jobs( - job_service.ListDataLabelingJobsRequest(), - parent='parent_value', - ) - - -def test_list_data_labeling_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_data_labeling_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, data_labeling_job.DataLabelingJob) - for i in results) - -def test_list_data_labeling_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_data_labeling_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_data_labeling_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, data_labeling_job.DataLabelingJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_data_labeling_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_data_labeling_job(transport: str = 'grpc', request_type=job_service.DeleteDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_data_labeling_job_from_dict(): - test_delete_data_labeling_job(request_type=dict) - - -def test_delete_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - client.delete_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_async_from_dict(): - await test_delete_data_labeling_job_async(request_type=dict) - - -def test_delete_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_data_labeling_job( - job_service.DeleteDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_data_labeling_job( - job_service.DeleteDataLabelingJobRequest(), - name='name_value', - ) - - -def test_cancel_data_labeling_job(transport: str = 'grpc', request_type=job_service.CancelDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_data_labeling_job_from_dict(): - test_cancel_data_labeling_job(request_type=dict) - - -def test_cancel_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - client.cancel_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_async_from_dict(): - await test_cancel_data_labeling_job_async(request_type=dict) - - -def test_cancel_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - call.return_value = None - client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_data_labeling_job( - job_service.CancelDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_data_labeling_job( - job_service.CancelDataLabelingJobRequest(), - name='name_value', - ) - - -def test_create_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CreateHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_hyperparameter_tuning_job_from_dict(): - test_create_hyperparameter_tuning_job(request_type=dict) - - -def test_create_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - client.create_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_async_from_dict(): - await test_create_hyperparameter_tuning_job_async(request_type=dict) - - -def test_create_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateHyperparameterTuningJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateHyperparameterTuningJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) - await client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_hyperparameter_tuning_job( - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].hyperparameter_tuning_job == gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') - - -def test_create_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_hyperparameter_tuning_job( - job_service.CreateHyperparameterTuningJobRequest(), - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_hyperparameter_tuning_job( - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].hyperparameter_tuning_job == gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_hyperparameter_tuning_job( - job_service.CreateHyperparameterTuningJobRequest(), - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - -def test_get_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.GetHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_hyperparameter_tuning_job_from_dict(): - test_get_hyperparameter_tuning_job(request_type=dict) - - -def test_get_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - client.get_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_async_from_dict(): - await test_get_hyperparameter_tuning_job_async(request_type=dict) - - -def test_get_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) - await client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_hyperparameter_tuning_job( - job_service.GetHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_hyperparameter_tuning_job( - job_service.GetHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_list_hyperparameter_tuning_jobs(transport: str = 'grpc', request_type=job_service.ListHyperparameterTuningJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHyperparameterTuningJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_hyperparameter_tuning_jobs_from_dict(): - test_list_hyperparameter_tuning_jobs(request_type=dict) - - -def test_list_hyperparameter_tuning_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - client.list_hyperparameter_tuning_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListHyperparameterTuningJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_from_dict(): - await test_list_hyperparameter_tuning_jobs_async(request_type=dict) - - -def test_list_hyperparameter_tuning_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListHyperparameterTuningJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListHyperparameterTuningJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) - await client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_hyperparameter_tuning_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_hyperparameter_tuning_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_hyperparameter_tuning_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hyperparameter_tuning_jobs( - job_service.ListHyperparameterTuningJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_hyperparameter_tuning_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_hyperparameter_tuning_jobs( - job_service.ListHyperparameterTuningJobsRequest(), - parent='parent_value', - ) - - -def test_list_hyperparameter_tuning_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_hyperparameter_tuning_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) - for i in results) - -def test_list_hyperparameter_tuning_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_hyperparameter_tuning_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_hyperparameter_tuning_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.DeleteHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_hyperparameter_tuning_job_from_dict(): - test_delete_hyperparameter_tuning_job(request_type=dict) - - -def test_delete_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - client.delete_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_async_from_dict(): - await test_delete_hyperparameter_tuning_job_async(request_type=dict) - - -def test_delete_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_hyperparameter_tuning_job( - job_service.DeleteHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_hyperparameter_tuning_job( - job_service.DeleteHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_cancel_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CancelHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_hyperparameter_tuning_job_from_dict(): - test_cancel_hyperparameter_tuning_job(request_type=dict) - - -def test_cancel_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - client.cancel_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_async_from_dict(): - await test_cancel_hyperparameter_tuning_job_async(request_type=dict) - - -def test_cancel_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = None - client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_hyperparameter_tuning_job( - job_service.CancelHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_hyperparameter_tuning_job( - job_service.CancelHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_create_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CreateBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_batch_prediction_job_from_dict(): - test_create_batch_prediction_job(request_type=dict) - - -def test_create_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - client.create_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_async_from_dict(): - await test_create_batch_prediction_job_async(request_type=dict) - - -def test_create_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateBatchPredictionJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateBatchPredictionJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) - await client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_batch_prediction_job( - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].batch_prediction_job == gca_batch_prediction_job.BatchPredictionJob(name='name_value') - - -def test_create_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_batch_prediction_job( - job_service.CreateBatchPredictionJobRequest(), - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_batch_prediction_job( - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].batch_prediction_job == gca_batch_prediction_job.BatchPredictionJob(name='name_value') - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_batch_prediction_job( - job_service.CreateBatchPredictionJobRequest(), - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - -def test_get_batch_prediction_job(transport: str = 'grpc', request_type=job_service.GetBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_batch_prediction_job_from_dict(): - test_get_batch_prediction_job(request_type=dict) - - -def test_get_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - client.get_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_async_from_dict(): - await test_get_batch_prediction_job_async(request_type=dict) - - -def test_get_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - call.return_value = batch_prediction_job.BatchPredictionJob() - client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) - await client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_batch_prediction_job( - job_service.GetBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_batch_prediction_job( - job_service.GetBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_list_batch_prediction_jobs(transport: str = 'grpc', request_type=job_service.ListBatchPredictionJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBatchPredictionJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_batch_prediction_jobs_from_dict(): - test_list_batch_prediction_jobs(request_type=dict) - - -def test_list_batch_prediction_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - client.list_batch_prediction_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListBatchPredictionJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_from_dict(): - await test_list_batch_prediction_jobs_async(request_type=dict) - - -def test_list_batch_prediction_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListBatchPredictionJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - call.return_value = job_service.ListBatchPredictionJobsResponse() - client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListBatchPredictionJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) - await client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_batch_prediction_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_batch_prediction_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_batch_prediction_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_batch_prediction_jobs( - job_service.ListBatchPredictionJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_batch_prediction_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_batch_prediction_jobs( - job_service.ListBatchPredictionJobsRequest(), - parent='parent_value', - ) - - -def test_list_batch_prediction_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_batch_prediction_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) - for i in results) - -def test_list_batch_prediction_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_batch_prediction_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_batch_prediction_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_batch_prediction_job(transport: str = 'grpc', request_type=job_service.DeleteBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_batch_prediction_job_from_dict(): - test_delete_batch_prediction_job(request_type=dict) - - -def test_delete_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - client.delete_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_async_from_dict(): - await test_delete_batch_prediction_job_async(request_type=dict) - - -def test_delete_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_batch_prediction_job( - job_service.DeleteBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_batch_prediction_job( - job_service.DeleteBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_cancel_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CancelBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_batch_prediction_job_from_dict(): - test_cancel_batch_prediction_job(request_type=dict) - - -def test_cancel_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - client.cancel_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_async_from_dict(): - await test_cancel_batch_prediction_job_async(request_type=dict) - - -def test_cancel_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - call.return_value = None - client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_batch_prediction_job( - job_service.CancelBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_batch_prediction_job( - job_service.CancelBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = JobServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.JobServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.JobServiceGrpcTransport, - ) - -def test_job_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.JobServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_job_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.JobServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_custom_job', - 'get_custom_job', - 'list_custom_jobs', - 'delete_custom_job', - 'cancel_custom_job', - 'create_data_labeling_job', - 'get_data_labeling_job', - 'list_data_labeling_jobs', - 'delete_data_labeling_job', - 'cancel_data_labeling_job', - 'create_hyperparameter_tuning_job', - 'get_hyperparameter_tuning_job', - 'list_hyperparameter_tuning_jobs', - 'delete_hyperparameter_tuning_job', - 'cancel_hyperparameter_tuning_job', - 'create_batch_prediction_job', - 'get_batch_prediction_job', - 'list_batch_prediction_jobs', - 'delete_batch_prediction_job', - 'cancel_batch_prediction_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_job_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_job_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_job_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_job_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_job_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_job_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_job_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.JobServiceGrpcTransport, grpc_helpers), - (transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_job_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_job_service_host_no_port(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_job_service_host_with_port(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_job_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_job_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_job_service_grpc_lro_client(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_job_service_grpc_lro_async_client(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_batch_prediction_job_path(): - project = "squid" - location = "clam" - batch_prediction_job = "whelk" - expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) - actual = JobServiceClient.batch_prediction_job_path(project, location, batch_prediction_job) - assert expected == actual - - -def test_parse_batch_prediction_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "batch_prediction_job": "nudibranch", - } - path = JobServiceClient.batch_prediction_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_batch_prediction_job_path(path) - assert expected == actual - -def test_custom_job_path(): - project = "cuttlefish" - location = "mussel" - custom_job = "winkle" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = JobServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "custom_job": "abalone", - } - path = JobServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_data_labeling_job_path(): - project = "squid" - location = "clam" - data_labeling_job = "whelk" - expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) - actual = JobServiceClient.data_labeling_job_path(project, location, data_labeling_job) - assert expected == actual - - -def test_parse_data_labeling_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "data_labeling_job": "nudibranch", - } - path = JobServiceClient.data_labeling_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_data_labeling_job_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = JobServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", - } - path = JobServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_hyperparameter_tuning_job_path(): - project = "squid" - location = "clam" - hyperparameter_tuning_job = "whelk" - expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) - actual = JobServiceClient.hyperparameter_tuning_job_path(project, location, hyperparameter_tuning_job) - assert expected == actual - - -def test_parse_hyperparameter_tuning_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "hyperparameter_tuning_job": "nudibranch", - } - path = JobServiceClient.hyperparameter_tuning_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = JobServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = JobServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_model_path(path) - assert expected == actual - -def test_network_path(): - project = "squid" - network = "clam" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = JobServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "whelk", - "network": "octopus", - } - path = JobServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_network_path(path) - assert expected == actual - -def test_trial_path(): - project = "oyster" - location = "nudibranch" - study = "cuttlefish" - trial = "mussel" - expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - actual = JobServiceClient.trial_path(project, location, study, trial) - assert expected == actual - - -def test_parse_trial_path(): - expected = { - "project": "winkle", - "location": "nautilus", - "study": "scallop", - "trial": "abalone", - } - path = JobServiceClient.trial_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_trial_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = JobServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = JobServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = JobServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = JobServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = JobServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = JobServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = JobServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = JobServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = JobServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = JobServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = JobServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py deleted file mode 100644 index 829b03be6e..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ /dev/null @@ -1,1753 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceAsyncClient -from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceClient -from google.cloud.aiplatform_v1.services.migration_service import pagers -from google.cloud.aiplatform_v1.services.migration_service import transports -from google.cloud.aiplatform_v1.services.migration_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1.types import migratable_resource -from google.cloud.aiplatform_v1.types import migration_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert MigrationServiceClient._get_default_mtls_endpoint(None) is None - assert MigrationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.MigrationServiceGrpcTransport, "grpc"), - (transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_migration_service_client_get_transport_class(): - transport = MigrationServiceClient.get_transport_class() - available_transports = [ - transports.MigrationServiceGrpcTransport, - ] - assert transport in available_transports - - transport = MigrationServiceClient.get_transport_class("grpc") - assert transport == transports.MigrationServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) -@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) -def test_migration_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "true"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "false"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) -@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_migration_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_migration_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = MigrationServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_search_migratable_resources(transport: str = 'grpc', request_type=migration_service.SearchMigratableResourcesRequest): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchMigratableResourcesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_migratable_resources_from_dict(): - test_search_migratable_resources(request_type=dict) - - -def test_search_migratable_resources_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - client.search_migratable_resources() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - -@pytest.mark.asyncio -async def test_search_migratable_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.SearchMigratableResourcesRequest): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_from_dict(): - await test_search_migratable_resources_async(request_type=dict) - - -def test_search_migratable_resources_field_headers(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.SearchMigratableResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - call.return_value = migration_service.SearchMigratableResourcesResponse() - client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_migratable_resources_field_headers_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.SearchMigratableResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) - await client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_search_migratable_resources_flattened(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_migratable_resources( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_search_migratable_resources_flattened_error(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_migratable_resources( - migration_service.SearchMigratableResourcesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_search_migratable_resources_flattened_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_migratable_resources( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_search_migratable_resources_flattened_error_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_migratable_resources( - migration_service.SearchMigratableResourcesRequest(), - parent='parent_value', - ) - - -def test_search_migratable_resources_pager(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.search_migratable_resources(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, migratable_resource.MigratableResource) - for i in results) - -def test_search_migratable_resources_pages(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - pages = list(client.search_migratable_resources(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_pager(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_migratable_resources(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, migratable_resource.MigratableResource) - for i in responses) - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_pages(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_migratable_resources(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_batch_migrate_resources(transport: str = 'grpc', request_type=migration_service.BatchMigrateResourcesRequest): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_migrate_resources_from_dict(): - test_batch_migrate_resources(request_type=dict) - - -def test_batch_migrate_resources_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - client.batch_migrate_resources() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.BatchMigrateResourcesRequest): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_async_from_dict(): - await test_batch_migrate_resources_async(request_type=dict) - - -def test_batch_migrate_resources_field_headers(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.BatchMigrateResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_field_headers_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.BatchMigrateResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_migrate_resources_flattened(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_migrate_resources( - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].migrate_resource_requests == [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] - - -def test_batch_migrate_resources_flattened_error(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_migrate_resources( - migration_service.BatchMigrateResourcesRequest(), - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_flattened_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_migrate_resources( - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].migrate_resource_requests == [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_flattened_error_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_migrate_resources( - migration_service.BatchMigrateResourcesRequest(), - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = MigrationServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.MigrationServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.MigrationServiceGrpcTransport, - ) - -def test_migration_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.MigrationServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_migration_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.MigrationServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'search_migratable_resources', - 'batch_migrate_resources', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_migration_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_migration_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_migration_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_migration_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MigrationServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_migration_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MigrationServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_migration_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_migration_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.MigrationServiceGrpcTransport, grpc_helpers), - (transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_migration_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_migration_service_host_no_port(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_migration_service_host_with_port(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_migration_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MigrationServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_migration_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MigrationServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_migration_service_grpc_lro_client(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_migration_service_grpc_lro_async_client(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotated_dataset_path(): - project = "squid" - dataset = "clam" - annotated_dataset = "whelk" - expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) - actual = MigrationServiceClient.annotated_dataset_path(project, dataset, annotated_dataset) - assert expected == actual - - -def test_parse_annotated_dataset_path(): - expected = { - "project": "octopus", - "dataset": "oyster", - "annotated_dataset": "nudibranch", - } - path = MigrationServiceClient.annotated_dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_annotated_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "squid" - location = "clam" - dataset = "whelk" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "octopus", - "location": "oyster", - "dataset": "nudibranch", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "winkle", - "dataset": "nautilus", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_model_path(): - project = "scallop" - location = "abalone" - model = "squid" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = MigrationServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "clam", - "location": "whelk", - "model": "octopus", - } - path = MigrationServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_path(): - project = "oyster" - location = "nudibranch" - model = "cuttlefish" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = MigrationServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "mussel", - "location": "winkle", - "model": "nautilus", - } - path = MigrationServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_model_path(path) - assert expected == actual - -def test_version_path(): - project = "scallop" - model = "abalone" - version = "squid" - expected = "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) - actual = MigrationServiceClient.version_path(project, model, version) - assert expected == actual - - -def test_parse_version_path(): - expected = { - "project": "clam", - "model": "whelk", - "version": "octopus", - } - path = MigrationServiceClient.version_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_version_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "oyster" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = MigrationServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nudibranch", - } - path = MigrationServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "cuttlefish" - expected = "folders/{folder}".format(folder=folder, ) - actual = MigrationServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "mussel", - } - path = MigrationServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "winkle" - expected = "organizations/{organization}".format(organization=organization, ) - actual = MigrationServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nautilus", - } - path = MigrationServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "scallop" - expected = "projects/{project}".format(project=project, ) - actual = MigrationServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "abalone", - } - path = MigrationServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "squid" - location = "clam" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = MigrationServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "whelk", - "location": "octopus", - } - path = MigrationServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = MigrationServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py deleted file mode 100644 index 1db06d340b..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py +++ /dev/null @@ -1,4051 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.model_service import ModelServiceAsyncClient -from google.cloud.aiplatform_v1.services.model_service import ModelServiceClient -from google.cloud.aiplatform_v1.services.model_service import pagers -from google.cloud.aiplatform_v1.services.model_service import transports -from google.cloud.aiplatform_v1.services.model_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert ModelServiceClient._get_default_mtls_endpoint(None) is None - assert ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.ModelServiceGrpcTransport, "grpc"), - (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_model_service_client_get_transport_class(): - transport = ModelServiceClient.get_transport_class() - available_transports = [ - transports.ModelServiceGrpcTransport, - ] - assert transport in available_transports - - transport = ModelServiceClient.get_transport_class("grpc") - assert transport == transports.ModelServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) -@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) -def test_model_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) -@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_model_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_model_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = ModelServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_upload_model(transport: str = 'grpc', request_type=model_service.UploadModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_upload_model_from_dict(): - test_upload_model(request_type=dict) - - -def test_upload_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - client.upload_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - -@pytest.mark.asyncio -async def test_upload_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UploadModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_upload_model_async_from_dict(): - await test_upload_model_async(request_type=dict) - - -def test_upload_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UploadModelRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_upload_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UploadModelRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_upload_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.upload_model( - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].model == gca_model.Model(name='name_value') - - -def test_upload_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.upload_model( - model_service.UploadModelRequest(), - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_upload_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.upload_model( - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].model == gca_model.Model(name='name_value') - - -@pytest.mark.asyncio -async def test_upload_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.upload_model( - model_service.UploadModelRequest(), - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - -def test_get_model(transport: str = 'grpc', request_type=model_service.GetModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - ) - response = client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -def test_get_model_from_dict(): - test_get_model(request_type=dict) - - -def test_get_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - client.get_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - -@pytest.mark.asyncio -async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - )) - response = await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_model_async_from_dict(): - await test_get_model_async(request_type=dict) - - -def test_get_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = model.Model() - client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model( - model_service.GetModelRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model( - model_service.GetModelRequest(), - name='name_value', - ) - - -def test_list_models(transport: str = 'grpc', request_type=model_service.ListModelsRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_models_from_dict(): - test_list_models(request_type=dict) - - -def test_list_models_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - client.list_models() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - -@pytest.mark.asyncio -async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelsRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_models_async_from_dict(): - await test_list_models_async(request_type=dict) - - -def test_list_models_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = model_service.ListModelsResponse() - client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_models_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) - await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_models_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_models_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_models( - model_service.ListModelsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_models_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_models_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_models( - model_service.ListModelsRequest(), - parent='parent_value', - ) - - -def test_list_models_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_models(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model.Model) - for i in results) - -def test_list_models_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = list(client.list_models(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_models_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_models(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model.Model) - for i in responses) - -@pytest.mark.asyncio -async def test_list_models_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_models(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_model(transport: str = 'grpc', request_type=model_service.UpdateModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - ) - response = client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -def test_update_model_from_dict(): - test_update_model(request_type=dict) - - -def test_update_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - client.update_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - -@pytest.mark.asyncio -async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UpdateModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - )) - response = await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_model_async_from_dict(): - await test_update_model_async(request_type=dict) - - -def test_update_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() - - request.model.name = 'model.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = gca_model.Model() - client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=model.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() - - request.model.name = 'model.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=model.name/value', - ) in kw['metadata'] - - -def test_update_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_model( - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].model == gca_model.Model(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_model( - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].model == gca_model.Model(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_model(transport: str = 'grpc', request_type=model_service.DeleteModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_model_from_dict(): - test_delete_model(request_type=dict) - - -def test_delete_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - client.delete_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - -@pytest.mark.asyncio -async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_model_async_from_dict(): - await test_delete_model_async(request_type=dict) - - -def test_delete_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model( - model_service.DeleteModelRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_model( - model_service.DeleteModelRequest(), - name='name_value', - ) - - -def test_export_model(transport: str = 'grpc', request_type=model_service.ExportModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_model_from_dict(): - test_export_model(request_type=dict) - - -def test_export_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - client.export_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - -@pytest.mark.asyncio -async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=model_service.ExportModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_model_async_from_dict(): - await test_export_model_async(request_type=dict) - - -def test_export_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ExportModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ExportModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_export_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_model( - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].output_config == model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') - - -def test_export_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_model( - model_service.ExportModelRequest(), - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - -@pytest.mark.asyncio -async def test_export_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_model( - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].output_config == model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') - - -@pytest.mark.asyncio -async def test_export_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_model( - model_service.ExportModelRequest(), - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - -def test_get_model_evaluation(transport: str = 'grpc', request_type=model_service.GetModelEvaluationRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - slice_dimensions=['slice_dimensions_value'], - ) - response = client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - assert response.slice_dimensions == ['slice_dimensions_value'] - - -def test_get_model_evaluation_from_dict(): - test_get_model_evaluation(request_type=dict) - - -def test_get_model_evaluation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - client.get_model_evaluation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - slice_dimensions=['slice_dimensions_value'], - )) - response = await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - assert response.slice_dimensions == ['slice_dimensions_value'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async_from_dict(): - await test_get_model_evaluation_async(request_type=dict) - - -def test_get_model_evaluation_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = model_evaluation.ModelEvaluation() - client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_evaluation_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_model_evaluation_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation( - model_service.GetModelEvaluationRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation( - model_service.GetModelEvaluationRequest(), - name='name_value', - ) - - -def test_list_model_evaluations(transport: str = 'grpc', request_type=model_service.ListModelEvaluationsRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluations_from_dict(): - test_list_model_evaluations(request_type=dict) - - -def test_list_model_evaluations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - client.list_model_evaluations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationsRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_from_dict(): - await test_list_model_evaluations_async(request_type=dict) - - -def test_list_model_evaluations_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = model_service.ListModelEvaluationsResponse() - client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluations_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) - await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_evaluations_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_model_evaluations_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluations( - model_service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluations( - model_service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluations_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluations(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in results) - -def test_list_model_evaluations_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_evaluations(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_model_evaluation_slice(transport: str = 'grpc', request_type=model_service.GetModelEvaluationSliceRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - ) - response = client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - - -def test_get_model_evaluation_slice_from_dict(): - test_get_model_evaluation_slice(request_type=dict) - - -def test_get_model_evaluation_slice_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - client.get_model_evaluation_slice() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationSliceRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - )) - response = await client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_async_from_dict(): - await test_get_model_evaluation_slice_async(request_type=dict) - - -def test_get_model_evaluation_slice_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationSliceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationSliceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) - await client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_evaluation_slice_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation_slice( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_model_evaluation_slice_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation_slice( - model_service.GetModelEvaluationSliceRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation_slice( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation_slice( - model_service.GetModelEvaluationSliceRequest(), - name='name_value', - ) - - -def test_list_model_evaluation_slices(transport: str = 'grpc', request_type=model_service.ListModelEvaluationSlicesRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationSlicesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluation_slices_from_dict(): - test_list_model_evaluation_slices(request_type=dict) - - -def test_list_model_evaluation_slices_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - client.list_model_evaluation_slices() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationSlicesRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationSlicesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_from_dict(): - await test_list_model_evaluation_slices_async(request_type=dict) - - -def test_list_model_evaluation_slices_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationSlicesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - call.return_value = model_service.ListModelEvaluationSlicesResponse() - client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationSlicesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) - await client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_evaluation_slices_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluation_slices( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_model_evaluation_slices_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluation_slices( - model_service.ListModelEvaluationSlicesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluation_slices( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluation_slices( - model_service.ListModelEvaluationSlicesRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluation_slices_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluation_slices(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) - for i in results) - -def test_list_model_evaluation_slices_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluation_slices(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluation_slices(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_evaluation_slices(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = ModelServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.ModelServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.ModelServiceGrpcTransport, - ) - -def test_model_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.ModelServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_model_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.ModelServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'upload_model', - 'get_model', - 'list_models', - 'update_model', - 'delete_model', - 'export_model', - 'get_model_evaluation', - 'list_model_evaluations', - 'get_model_evaluation_slice', - 'list_model_evaluation_slices', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_model_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_model_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_model_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_model_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ModelServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_model_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ModelServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_model_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_model_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.ModelServiceGrpcTransport, grpc_helpers), - (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_model_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_model_service_host_no_port(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_model_service_host_with_port(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_model_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ModelServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_model_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ModelServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_model_service_grpc_lro_client(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_model_service_grpc_lro_async_client(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = ModelServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = ModelServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = ModelServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = ModelServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_evaluation_path(): - project = "squid" - location = "clam" - model = "whelk" - evaluation = "octopus" - expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) - actual = ModelServiceClient.model_evaluation_path(project, location, model, evaluation) - assert expected == actual - - -def test_parse_model_evaluation_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "model": "cuttlefish", - "evaluation": "mussel", - } - path = ModelServiceClient.model_evaluation_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_evaluation_path(path) - assert expected == actual - -def test_model_evaluation_slice_path(): - project = "winkle" - location = "nautilus" - model = "scallop" - evaluation = "abalone" - slice = "squid" - expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) - actual = ModelServiceClient.model_evaluation_slice_path(project, location, model, evaluation, slice) - assert expected == actual - - -def test_parse_model_evaluation_slice_path(): - expected = { - "project": "clam", - "location": "whelk", - "model": "octopus", - "evaluation": "oyster", - "slice": "nudibranch", - } - path = ModelServiceClient.model_evaluation_slice_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_evaluation_slice_path(path) - assert expected == actual - -def test_training_pipeline_path(): - project = "cuttlefish" - location = "mussel" - training_pipeline = "winkle" - expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - actual = ModelServiceClient.training_pipeline_path(project, location, training_pipeline) - assert expected == actual - - -def test_parse_training_pipeline_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "training_pipeline": "abalone", - } - path = ModelServiceClient.training_pipeline_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_training_pipeline_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = ModelServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = ModelServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = ModelServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = ModelServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = ModelServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = ModelServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = ModelServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = ModelServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = ModelServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = ModelServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = ModelServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py deleted file mode 100644 index d4e51cd143..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ /dev/null @@ -1,3916 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceAsyncClient -from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceClient -from google.cloud.aiplatform_v1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1.services.pipeline_service import transports -from google.cloud.aiplatform_v1.services.pipeline_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.cloud.aiplatform_v1.types import execution -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import pipeline_state -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.cloud.aiplatform_v1.types import value -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PipelineServiceClient._get_default_mtls_endpoint(None) is None - assert PipelineServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PipelineServiceGrpcTransport, "grpc"), - (transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_pipeline_service_client_get_transport_class(): - transport = PipelineServiceClient.get_transport_class() - available_transports = [ - transports.PipelineServiceGrpcTransport, - ] - assert transport in available_transports - - transport = PipelineServiceClient.get_transport_class("grpc") - assert transport == transports.PipelineServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) -@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) -def test_pipeline_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "true"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "false"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) -@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_pipeline_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_pipeline_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PipelineServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CreateTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - ) - response = client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -def test_create_training_pipeline_from_dict(): - test_create_training_pipeline(request_type=dict) - - -def test_create_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - client.create_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_create_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreateTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - )) - response = await client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_training_pipeline_async_from_dict(): - await test_create_training_pipeline_async(request_type=dict) - - -def test_create_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreateTrainingPipelineRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - call.return_value = gca_training_pipeline.TrainingPipeline() - client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreateTrainingPipelineRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) - await client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_training_pipeline( - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].training_pipeline == gca_training_pipeline.TrainingPipeline(name='name_value') - - -def test_create_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_training_pipeline( - pipeline_service.CreateTrainingPipelineRequest(), - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_training_pipeline( - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].training_pipeline == gca_training_pipeline.TrainingPipeline(name='name_value') - - -@pytest.mark.asyncio -async def test_create_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_training_pipeline( - pipeline_service.CreateTrainingPipelineRequest(), - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - -def test_get_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.GetTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - ) - response = client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -def test_get_training_pipeline_from_dict(): - test_get_training_pipeline(request_type=dict) - - -def test_get_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - client.get_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_get_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - )) - response = await client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_training_pipeline_async_from_dict(): - await test_get_training_pipeline_async(request_type=dict) - - -def test_get_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - call.return_value = training_pipeline.TrainingPipeline() - client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) - await client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_training_pipeline( - pipeline_service.GetTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_training_pipeline( - pipeline_service.GetTrainingPipelineRequest(), - name='name_value', - ) - - -def test_list_training_pipelines(transport: str = 'grpc', request_type=pipeline_service.ListTrainingPipelinesRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrainingPipelinesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_training_pipelines_from_dict(): - test_list_training_pipelines(request_type=dict) - - -def test_list_training_pipelines_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - client.list_training_pipelines() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - -@pytest.mark.asyncio -async def test_list_training_pipelines_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListTrainingPipelinesRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_from_dict(): - await test_list_training_pipelines_async(request_type=dict) - - -def test_list_training_pipelines_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListTrainingPipelinesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_training_pipelines_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListTrainingPipelinesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) - await client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_training_pipelines_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_training_pipelines( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_training_pipelines_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_training_pipelines( - pipeline_service.ListTrainingPipelinesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_training_pipelines_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_training_pipelines( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_training_pipelines_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_training_pipelines( - pipeline_service.ListTrainingPipelinesRequest(), - parent='parent_value', - ) - - -def test_list_training_pipelines_pager(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_training_pipelines(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, training_pipeline.TrainingPipeline) - for i in results) - -def test_list_training_pipelines_pages(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - pages = list(client.list_training_pipelines(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_pager(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_training_pipelines(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, training_pipeline.TrainingPipeline) - for i in responses) - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_pages(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_training_pipelines(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.DeleteTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_training_pipeline_from_dict(): - test_delete_training_pipeline(request_type=dict) - - -def test_delete_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - client.delete_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeleteTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_async_from_dict(): - await test_delete_training_pipeline_async(request_type=dict) - - -def test_delete_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeleteTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeleteTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_training_pipeline( - pipeline_service.DeleteTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_training_pipeline( - pipeline_service.DeleteTrainingPipelineRequest(), - name='name_value', - ) - - -def test_cancel_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CancelTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_training_pipeline_from_dict(): - test_cancel_training_pipeline(request_type=dict) - - -def test_cancel_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - client.cancel_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_async_from_dict(): - await test_cancel_training_pipeline_async(request_type=dict) - - -def test_cancel_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - call.return_value = None - client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_training_pipeline( - pipeline_service.CancelTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_training_pipeline( - pipeline_service.CancelTrainingPipelineRequest(), - name='name_value', - ) - - -def test_create_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CreatePipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - ) - response = client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -def test_create_pipeline_job_from_dict(): - test_create_pipeline_job(request_type=dict) - - -def test_create_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - client.create_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - -@pytest.mark.asyncio -async def test_create_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreatePipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - )) - response = await client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_create_pipeline_job_async_from_dict(): - await test_create_pipeline_job_async(request_type=dict) - - -def test_create_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreatePipelineJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - call.return_value = gca_pipeline_job.PipelineJob() - client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreatePipelineJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) - await client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_pipeline_job( - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name='name_value') - assert args[0].pipeline_job_id == 'pipeline_job_id_value' - - -def test_create_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_pipeline_job( - pipeline_service.CreatePipelineJobRequest(), - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_pipeline_job( - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name='name_value') - assert args[0].pipeline_job_id == 'pipeline_job_id_value' - - -@pytest.mark.asyncio -async def test_create_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_pipeline_job( - pipeline_service.CreatePipelineJobRequest(), - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - -def test_get_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.GetPipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - ) - response = client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -def test_get_pipeline_job_from_dict(): - test_get_pipeline_job(request_type=dict) - - -def test_get_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - client.get_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - -@pytest.mark.asyncio -async def test_get_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetPipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - )) - response = await client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_get_pipeline_job_async_from_dict(): - await test_get_pipeline_job_async(request_type=dict) - - -def test_get_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - call.return_value = pipeline_job.PipelineJob() - client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) - await client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_pipeline_job( - pipeline_service.GetPipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_pipeline_job( - pipeline_service.GetPipelineJobRequest(), - name='name_value', - ) - - -def test_list_pipeline_jobs(transport: str = 'grpc', request_type=pipeline_service.ListPipelineJobsRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListPipelineJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_pipeline_jobs_from_dict(): - test_list_pipeline_jobs(request_type=dict) - - -def test_list_pipeline_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - client.list_pipeline_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListPipelineJobsRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListPipelineJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_from_dict(): - await test_list_pipeline_jobs_async(request_type=dict) - - -def test_list_pipeline_jobs_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListPipelineJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - call.return_value = pipeline_service.ListPipelineJobsResponse() - client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListPipelineJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) - await client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_pipeline_jobs_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_pipeline_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_pipeline_jobs_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_pipeline_jobs( - pipeline_service.ListPipelineJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_pipeline_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_pipeline_jobs( - pipeline_service.ListPipelineJobsRequest(), - parent='parent_value', - ) - - -def test_list_pipeline_jobs_pager(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_pipeline_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, pipeline_job.PipelineJob) - for i in results) - -def test_list_pipeline_jobs_pages(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_pipeline_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_pager(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_pipeline_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, pipeline_job.PipelineJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_pages(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_pipeline_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.DeletePipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_pipeline_job_from_dict(): - test_delete_pipeline_job(request_type=dict) - - -def test_delete_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - client.delete_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeletePipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_async_from_dict(): - await test_delete_pipeline_job_async(request_type=dict) - - -def test_delete_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeletePipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeletePipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_pipeline_job( - pipeline_service.DeletePipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_pipeline_job( - pipeline_service.DeletePipelineJobRequest(), - name='name_value', - ) - - -def test_cancel_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CancelPipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_pipeline_job_from_dict(): - test_cancel_pipeline_job(request_type=dict) - - -def test_cancel_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - client.cancel_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelPipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_async_from_dict(): - await test_cancel_pipeline_job_async(request_type=dict) - - -def test_cancel_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - call.return_value = None - client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_pipeline_job( - pipeline_service.CancelPipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_pipeline_job( - pipeline_service.CancelPipelineJobRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PipelineServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PipelineServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PipelineServiceGrpcTransport, - ) - -def test_pipeline_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PipelineServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_pipeline_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PipelineServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_training_pipeline', - 'get_training_pipeline', - 'list_training_pipelines', - 'delete_training_pipeline', - 'cancel_training_pipeline', - 'create_pipeline_job', - 'get_pipeline_job', - 'list_pipeline_jobs', - 'delete_pipeline_job', - 'cancel_pipeline_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_pipeline_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_pipeline_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_pipeline_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_pipeline_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PipelineServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_pipeline_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PipelineServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_pipeline_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_pipeline_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PipelineServiceGrpcTransport, grpc_helpers), - (transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_pipeline_service_host_no_port(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_pipeline_service_host_with_port(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_pipeline_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PipelineServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_pipeline_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PipelineServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_pipeline_service_grpc_lro_client(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_pipeline_service_grpc_lro_async_client(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_artifact_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - artifact = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - actual = PipelineServiceClient.artifact_path(project, location, metadata_store, artifact) - assert expected == actual - - -def test_parse_artifact_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "artifact": "mussel", - } - path = PipelineServiceClient.artifact_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_artifact_path(path) - assert expected == actual - -def test_context_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - context = "abalone" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - actual = PipelineServiceClient.context_path(project, location, metadata_store, context) - assert expected == actual - - -def test_parse_context_path(): - expected = { - "project": "squid", - "location": "clam", - "metadata_store": "whelk", - "context": "octopus", - } - path = PipelineServiceClient.context_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_context_path(path) - assert expected == actual - -def test_custom_job_path(): - project = "oyster" - location = "nudibranch" - custom_job = "cuttlefish" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = PipelineServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "mussel", - "location": "winkle", - "custom_job": "nautilus", - } - path = PipelineServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_endpoint_path(): - project = "scallop" - location = "abalone" - endpoint = "squid" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = PipelineServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "clam", - "location": "whelk", - "endpoint": "octopus", - } - path = PipelineServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_execution_path(): - project = "oyster" - location = "nudibranch" - metadata_store = "cuttlefish" - execution = "mussel" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - actual = PipelineServiceClient.execution_path(project, location, metadata_store, execution) - assert expected == actual - - -def test_parse_execution_path(): - expected = { - "project": "winkle", - "location": "nautilus", - "metadata_store": "scallop", - "execution": "abalone", - } - path = PipelineServiceClient.execution_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_execution_path(path) - assert expected == actual - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = PipelineServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = PipelineServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_model_path(path) - assert expected == actual - -def test_network_path(): - project = "cuttlefish" - network = "mussel" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = PipelineServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "winkle", - "network": "nautilus", - } - path = PipelineServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_network_path(path) - assert expected == actual - -def test_pipeline_job_path(): - project = "scallop" - location = "abalone" - pipeline_job = "squid" - expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) - actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) - assert expected == actual - - -def test_parse_pipeline_job_path(): - expected = { - "project": "clam", - "location": "whelk", - "pipeline_job": "octopus", - } - path = PipelineServiceClient.pipeline_job_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_pipeline_job_path(path) - assert expected == actual - -def test_training_pipeline_path(): - project = "oyster" - location = "nudibranch" - training_pipeline = "cuttlefish" - expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - actual = PipelineServiceClient.training_pipeline_path(project, location, training_pipeline) - assert expected == actual - - -def test_parse_training_pipeline_path(): - expected = { - "project": "mussel", - "location": "winkle", - "training_pipeline": "nautilus", - } - path = PipelineServiceClient.training_pipeline_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_training_pipeline_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "scallop" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PipelineServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "abalone", - } - path = PipelineServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "squid" - expected = "folders/{folder}".format(folder=folder, ) - actual = PipelineServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "clam", - } - path = PipelineServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "whelk" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PipelineServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "octopus", - } - path = PipelineServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "oyster" - expected = "projects/{project}".format(project=project, ) - actual = PipelineServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nudibranch", - } - path = PipelineServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "cuttlefish" - location = "mussel" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PipelineServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "winkle", - "location": "nautilus", - } - path = PipelineServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PipelineServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py deleted file mode 100644 index d0f9db1820..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py +++ /dev/null @@ -1,1175 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.prediction_service import PredictionServiceAsyncClient -from google.cloud.aiplatform_v1.services.prediction_service import PredictionServiceClient -from google.cloud.aiplatform_v1.services.prediction_service import transports -from google.cloud.aiplatform_v1.services.prediction_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1.types import prediction_service -from google.oauth2 import service_account -from google.protobuf import struct_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PredictionServiceClient._get_default_mtls_endpoint(None) is None - assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PredictionServiceGrpcTransport, "grpc"), - (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_prediction_service_client_get_transport_class(): - transport = PredictionServiceClient.get_transport_class() - available_transports = [ - transports.PredictionServiceGrpcTransport, - ] - assert transport in available_transports - - transport = PredictionServiceClient.get_transport_class("grpc") - assert transport == transports.PredictionServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -def test_prediction_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_prediction_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PredictionServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_predict(transport: str = 'grpc', request_type=prediction_service.PredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse( - deployed_model_id='deployed_model_id_value', - ) - response = client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -def test_predict_from_dict(): - test_predict(request_type=dict) - - -def test_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - client.predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - -@pytest.mark.asyncio -async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( - deployed_model_id='deployed_model_id_value', - )) - response = await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -@pytest.mark.asyncio -async def test_predict_async_from_dict(): - await test_predict_async(request_type=dict) - - -def test_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = prediction_service.PredictResponse() - client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.predict( - endpoint='endpoint_value', - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - - -def test_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.predict( - prediction_service.PredictRequest(), - endpoint='endpoint_value', - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - ) - - -@pytest.mark.asyncio -async def test_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.predict( - endpoint='endpoint_value', - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - - -@pytest.mark.asyncio -async def test_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.predict( - prediction_service.PredictRequest(), - endpoint='endpoint_value', - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PredictionServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PredictionServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PredictionServiceGrpcTransport, - ) - -def test_prediction_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_prediction_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'predict', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - -@requires_google_auth_gte_1_25_0 -def test_prediction_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_prediction_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_prediction_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_prediction_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PredictionServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_prediction_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PredictionServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_prediction_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_prediction_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PredictionServiceGrpcTransport, grpc_helpers), - (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_prediction_service_host_no_port(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_prediction_service_host_with_port(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_prediction_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_prediction_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = PredictionServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = PredictionServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PredictionServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = PredictionServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = PredictionServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = PredictionServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PredictionServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = PredictionServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = PredictionServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = PredictionServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PredictionServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = PredictionServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PredictionServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py deleted file mode 100644 index 09242fe3e6..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py +++ /dev/null @@ -1,2346 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.specialist_pool_service import SpecialistPoolServiceAsyncClient -from google.cloud.aiplatform_v1.services.specialist_pool_service import SpecialistPoolServiceClient -from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1.services.specialist_pool_service import transports -from google.cloud.aiplatform_v1.services.specialist_pool_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(None) is None - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_specialist_pool_service_client_get_transport_class(): - transport = SpecialistPoolServiceClient.get_transport_class() - available_transports = [ - transports.SpecialistPoolServiceGrpcTransport, - ] - assert transport in available_transports - - transport = SpecialistPoolServiceClient.get_transport_class("grpc") - assert transport == transports.SpecialistPoolServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) -@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) -def test_specialist_pool_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "true"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "false"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) -@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_specialist_pool_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_specialist_pool_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = SpecialistPoolServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.CreateSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_specialist_pool_from_dict(): - test_create_specialist_pool(request_type=dict) - - -def test_create_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - client.create_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_create_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.CreateSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_specialist_pool_async_from_dict(): - await test_create_specialist_pool_async(request_type=dict) - - -def test_create_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.CreateSpecialistPoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.CreateSpecialistPoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_specialist_pool( - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') - - -def test_create_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_specialist_pool( - specialist_pool_service.CreateSpecialistPoolRequest(), - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_specialist_pool( - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') - - -@pytest.mark.asyncio -async def test_create_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_specialist_pool( - specialist_pool_service.CreateSpecialistPoolRequest(), - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - -def test_get_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.GetSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool( - name='name_value', - display_name='display_name_value', - specialist_managers_count=2662, - specialist_manager_emails=['specialist_manager_emails_value'], - pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], - ) - response = client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, specialist_pool.SpecialistPool) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.specialist_managers_count == 2662 - assert response.specialist_manager_emails == ['specialist_manager_emails_value'] - assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] - - -def test_get_specialist_pool_from_dict(): - test_get_specialist_pool(request_type=dict) - - -def test_get_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - client.get_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_get_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.GetSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool( - name='name_value', - display_name='display_name_value', - specialist_managers_count=2662, - specialist_manager_emails=['specialist_manager_emails_value'], - pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], - )) - response = await client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, specialist_pool.SpecialistPool) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.specialist_managers_count == 2662 - assert response.specialist_manager_emails == ['specialist_manager_emails_value'] - assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] - - -@pytest.mark.asyncio -async def test_get_specialist_pool_async_from_dict(): - await test_get_specialist_pool_async(request_type=dict) - - -def test_get_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.GetSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - call.return_value = specialist_pool.SpecialistPool() - client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.GetSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) - await client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_specialist_pool( - specialist_pool_service.GetSpecialistPoolRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_specialist_pool( - specialist_pool_service.GetSpecialistPoolRequest(), - name='name_value', - ) - - -def test_list_specialist_pools(transport: str = 'grpc', request_type=specialist_pool_service.ListSpecialistPoolsRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSpecialistPoolsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_specialist_pools_from_dict(): - test_list_specialist_pools(request_type=dict) - - -def test_list_specialist_pools_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - client.list_specialist_pools() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - -@pytest.mark.asyncio -async def test_list_specialist_pools_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.ListSpecialistPoolsRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_from_dict(): - await test_list_specialist_pools_async(request_type=dict) - - -def test_list_specialist_pools_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.ListSpecialistPoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_specialist_pools_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.ListSpecialistPoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) - await client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_specialist_pools_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_specialist_pools( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_specialist_pools_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_specialist_pools( - specialist_pool_service.ListSpecialistPoolsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_specialist_pools_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_specialist_pools( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_specialist_pools_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_specialist_pools( - specialist_pool_service.ListSpecialistPoolsRequest(), - parent='parent_value', - ) - - -def test_list_specialist_pools_pager(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_specialist_pools(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, specialist_pool.SpecialistPool) - for i in results) - -def test_list_specialist_pools_pages(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - pages = list(client.list_specialist_pools(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_pager(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_specialist_pools(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, specialist_pool.SpecialistPool) - for i in responses) - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_pages(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_specialist_pools(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_specialist_pool_from_dict(): - test_delete_specialist_pool(request_type=dict) - - -def test_delete_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - client.delete_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_async_from_dict(): - await test_delete_specialist_pool_async(request_type=dict) - - -def test_delete_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.DeleteSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.DeleteSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_specialist_pool( - specialist_pool_service.DeleteSpecialistPoolRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_specialist_pool( - specialist_pool_service.DeleteSpecialistPoolRequest(), - name='name_value', - ) - - -def test_update_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_specialist_pool_from_dict(): - test_update_specialist_pool(request_type=dict) - - -def test_update_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - client.update_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_update_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_async_from_dict(): - await test_update_specialist_pool_async(request_type=dict) - - -def test_update_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.UpdateSpecialistPoolRequest() - - request.specialist_pool.name = 'specialist_pool.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'specialist_pool.name=specialist_pool.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.UpdateSpecialistPoolRequest() - - request.specialist_pool.name = 'specialist_pool.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'specialist_pool.name=specialist_pool.name/value', - ) in kw['metadata'] - - -def test_update_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_specialist_pool( - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_specialist_pool( - specialist_pool_service.UpdateSpecialistPoolRequest(), - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_specialist_pool( - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_specialist_pool( - specialist_pool_service.UpdateSpecialistPoolRequest(), - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = SpecialistPoolServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.SpecialistPoolServiceGrpcTransport, - ) - -def test_specialist_pool_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.SpecialistPoolServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_specialist_pool_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.SpecialistPoolServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_specialist_pool', - 'get_specialist_pool', - 'list_specialist_pools', - 'delete_specialist_pool', - 'update_specialist_pool', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_specialist_pool_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_specialist_pool_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_specialist_pool_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_specialist_pool_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - SpecialistPoolServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_specialist_pool_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - SpecialistPoolServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_specialist_pool_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_specialist_pool_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.SpecialistPoolServiceGrpcTransport, grpc_helpers), - (transports.SpecialistPoolServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_specialist_pool_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_specialist_pool_service_host_no_port(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_specialist_pool_service_host_with_port(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_specialist_pool_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.SpecialistPoolServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_specialist_pool_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_specialist_pool_service_grpc_lro_client(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_specialist_pool_service_grpc_lro_async_client(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_specialist_pool_path(): - project = "squid" - location = "clam" - specialist_pool = "whelk" - expected = "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) - actual = SpecialistPoolServiceClient.specialist_pool_path(project, location, specialist_pool) - assert expected == actual - - -def test_parse_specialist_pool_path(): - expected = { - "project": "octopus", - "location": "oyster", - "specialist_pool": "nudibranch", - } - path = SpecialistPoolServiceClient.specialist_pool_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_specialist_pool_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = SpecialistPoolServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = SpecialistPoolServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = SpecialistPoolServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = SpecialistPoolServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = SpecialistPoolServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = SpecialistPoolServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = SpecialistPoolServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = SpecialistPoolServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = SpecialistPoolServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = SpecialistPoolServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = SpecialistPoolServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/.coveragerc b/owl-bot-staging/v1beta1/.coveragerc deleted file mode 100644 index a328166917..0000000000 --- a/owl-bot-staging/v1beta1/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/MANIFEST.in b/owl-bot-staging/v1beta1/MANIFEST.in deleted file mode 100644 index e386e05fec..0000000000 --- a/owl-bot-staging/v1beta1/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1beta1/schema/trainingjob/definition *.py -recursive-include google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/README.rst b/owl-bot-staging/v1beta1/README.rst deleted file mode 100644 index c0e4d26d4d..0000000000 --- a/owl-bot-staging/v1beta1/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst deleted file mode 100644 index 43fad30e55..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -DatasetService --------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst deleted file mode 100644 index 022799a059..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -EndpointService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst deleted file mode 100644 index 21013eb751..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst +++ /dev/null @@ -1,6 +0,0 @@ -FeaturestoreOnlineServingService --------------------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst deleted file mode 100644 index 8d2f33039e..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -FeaturestoreService -------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst deleted file mode 100644 index 65c910142e..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -IndexEndpointService --------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst deleted file mode 100644 index 96afb58594..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -IndexService ------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst deleted file mode 100644 index 46b1268166..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -JobService ----------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst deleted file mode 100644 index 3c07725687..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -MetadataService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst deleted file mode 100644 index be164d59ba..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -MigrationService ----------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst deleted file mode 100644 index be68f796b0..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -ModelService ------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst deleted file mode 100644 index 1180370863..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -PipelineService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst deleted file mode 100644 index 03c1150df0..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst +++ /dev/null @@ -1,6 +0,0 @@ -PredictionService ------------------------------------ - -.. automodule:: google.cloud.aiplatform_v1beta1.services.prediction_service - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst deleted file mode 100644 index 490112c7d9..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst +++ /dev/null @@ -1,20 +0,0 @@ -Services for Google Cloud Aiplatform v1beta1 API -================================================ -.. toctree:: - :maxdepth: 2 - - dataset_service - endpoint_service - featurestore_online_serving_service - featurestore_service - index_endpoint_service - index_service - job_service - metadata_service - migration_service - model_service - pipeline_service - prediction_service - specialist_pool_service - tensorboard_service - vizier_service diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst deleted file mode 100644 index 2f13b68844..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -SpecialistPoolService ---------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst deleted file mode 100644 index 97d94feedc..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -TensorboardService ------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst deleted file mode 100644 index 770675f8ea..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform v1beta1 API -============================================= - -.. automodule:: google.cloud.aiplatform_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst deleted file mode 100644 index 8cad590f6c..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -VizierService -------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.vizier_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.vizier_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/conf.py b/owl-bot-staging/v1beta1/docs/conf.py deleted file mode 100644 index 7a174dcfc0..0000000000 --- a/owl-bot-staging/v1beta1/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1beta1-schema-trainingjob-definition documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1beta1 Schema Trainingjob Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition.tex", - u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", - u"Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", - u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition Documentation", - author, - "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", - "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst deleted file mode 100644 index 5f1ed5f2b7..0000000000 --- a/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API -====================================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst deleted file mode 100644 index f4fe7a5301..0000000000 --- a/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API -=================================================================================== - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/index.rst b/owl-bot-staging/v1beta1/docs/index.rst deleted file mode 100644 index ec6c42c2ed..0000000000 --- a/owl-bot-staging/v1beta1/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - definition_v1beta1/services - definition_v1beta1/types diff --git a/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst deleted file mode 100644 index 941dbcca59..0000000000 --- a/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API -================================================================================ -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst deleted file mode 100644 index 7caa088065..0000000000 --- a/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API -============================================================================= - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst deleted file mode 100644 index b3b897a0f4..0000000000 --- a/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API -============================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst deleted file mode 100644 index 722a1d8ba0..0000000000 --- a/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API -=========================================================================== - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst deleted file mode 100644 index 6de5e17520..0000000000 --- a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API -================================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst deleted file mode 100644 index b14182d6d7..0000000000 --- a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API -=============================================================================== - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py deleted file mode 100644 index 009ab39ee1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py +++ /dev/null @@ -1,875 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.cloud.aiplatform_v1beta1.services.dataset_service.client import DatasetServiceClient -from google.cloud.aiplatform_v1beta1.services.dataset_service.async_client import DatasetServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.endpoint_service.client import EndpointServiceClient -from google.cloud.aiplatform_v1beta1.services.endpoint_service.async_client import EndpointServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.client import FeaturestoreOnlineServingServiceClient -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.async_client import FeaturestoreOnlineServingServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.featurestore_service.client import FeaturestoreServiceClient -from google.cloud.aiplatform_v1beta1.services.featurestore_service.async_client import FeaturestoreServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.client import IndexEndpointServiceClient -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.async_client import IndexEndpointServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.index_service.client import IndexServiceClient -from google.cloud.aiplatform_v1beta1.services.index_service.async_client import IndexServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.job_service.client import JobServiceClient -from google.cloud.aiplatform_v1beta1.services.job_service.async_client import JobServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.metadata_service.client import MetadataServiceClient -from google.cloud.aiplatform_v1beta1.services.metadata_service.async_client import MetadataServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.migration_service.client import MigrationServiceClient -from google.cloud.aiplatform_v1beta1.services.migration_service.async_client import MigrationServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.model_service.client import ModelServiceClient -from google.cloud.aiplatform_v1beta1.services.model_service.async_client import ModelServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.pipeline_service.client import PipelineServiceClient -from google.cloud.aiplatform_v1beta1.services.pipeline_service.async_client import PipelineServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.prediction_service.client import PredictionServiceClient -from google.cloud.aiplatform_v1beta1.services.prediction_service.async_client import PredictionServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.client import SpecialistPoolServiceClient -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.async_client import SpecialistPoolServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.tensorboard_service.client import TensorboardServiceClient -from google.cloud.aiplatform_v1beta1.services.tensorboard_service.async_client import TensorboardServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.vizier_service.client import VizierServiceClient -from google.cloud.aiplatform_v1beta1.services.vizier_service.async_client import VizierServiceAsyncClient - -from google.cloud.aiplatform_v1beta1.types.accelerator_type import AcceleratorType -from google.cloud.aiplatform_v1beta1.types.annotation import Annotation -from google.cloud.aiplatform_v1beta1.types.annotation_spec import AnnotationSpec -from google.cloud.aiplatform_v1beta1.types.artifact import Artifact -from google.cloud.aiplatform_v1beta1.types.batch_prediction_job import BatchPredictionJob -from google.cloud.aiplatform_v1beta1.types.completion_stats import CompletionStats -from google.cloud.aiplatform_v1beta1.types.context import Context -from google.cloud.aiplatform_v1beta1.types.custom_job import ContainerSpec -from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJob -from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJobSpec -from google.cloud.aiplatform_v1beta1.types.custom_job import PythonPackageSpec -from google.cloud.aiplatform_v1beta1.types.custom_job import Scheduling -from google.cloud.aiplatform_v1beta1.types.custom_job import WorkerPoolSpec -from google.cloud.aiplatform_v1beta1.types.data_item import DataItem -from google.cloud.aiplatform_v1beta1.types.data_labeling_job import ActiveLearningConfig -from google.cloud.aiplatform_v1beta1.types.data_labeling_job import DataLabelingJob -from google.cloud.aiplatform_v1beta1.types.data_labeling_job import SampleConfig -from google.cloud.aiplatform_v1beta1.types.data_labeling_job import TrainingConfig -from google.cloud.aiplatform_v1beta1.types.dataset import Dataset -from google.cloud.aiplatform_v1beta1.types.dataset import ExportDataConfig -from google.cloud.aiplatform_v1beta1.types.dataset import ImportDataConfig -from google.cloud.aiplatform_v1beta1.types.dataset_service import CreateDatasetOperationMetadata -from google.cloud.aiplatform_v1beta1.types.dataset_service import CreateDatasetRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import DeleteDatasetRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataOperationMetadata -from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import GetAnnotationSpecRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import GetDatasetRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataOperationMetadata -from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListAnnotationsRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListAnnotationsResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDataItemsRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDataItemsResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDatasetsRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDatasetsResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import UpdateDatasetRequest -from google.cloud.aiplatform_v1beta1.types.deployed_index_ref import DeployedIndexRef -from google.cloud.aiplatform_v1beta1.types.deployed_model_ref import DeployedModelRef -from google.cloud.aiplatform_v1beta1.types.encryption_spec import EncryptionSpec -from google.cloud.aiplatform_v1beta1.types.endpoint import DeployedModel -from google.cloud.aiplatform_v1beta1.types.endpoint import Endpoint -from google.cloud.aiplatform_v1beta1.types.endpoint_service import CreateEndpointOperationMetadata -from google.cloud.aiplatform_v1beta1.types.endpoint_service import CreateEndpointRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeleteEndpointRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelOperationMetadata -from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelResponse -from google.cloud.aiplatform_v1beta1.types.endpoint_service import GetEndpointRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import ListEndpointsRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import ListEndpointsResponse -from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelOperationMetadata -from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelResponse -from google.cloud.aiplatform_v1beta1.types.endpoint_service import UpdateEndpointRequest -from google.cloud.aiplatform_v1beta1.types.entity_type import EntityType -from google.cloud.aiplatform_v1beta1.types.env_var import EnvVar -from google.cloud.aiplatform_v1beta1.types.event import Event -from google.cloud.aiplatform_v1beta1.types.execution import Execution -from google.cloud.aiplatform_v1beta1.types.explanation import Attribution -from google.cloud.aiplatform_v1beta1.types.explanation import Explanation -from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationMetadataOverride -from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationParameters -from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationSpec -from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationSpecOverride -from google.cloud.aiplatform_v1beta1.types.explanation import FeatureNoiseSigma -from google.cloud.aiplatform_v1beta1.types.explanation import IntegratedGradientsAttribution -from google.cloud.aiplatform_v1beta1.types.explanation import ModelExplanation -from google.cloud.aiplatform_v1beta1.types.explanation import SampledShapleyAttribution -from google.cloud.aiplatform_v1beta1.types.explanation import SmoothGradConfig -from google.cloud.aiplatform_v1beta1.types.explanation import XraiAttribution -from google.cloud.aiplatform_v1beta1.types.explanation_metadata import ExplanationMetadata -from google.cloud.aiplatform_v1beta1.types.feature import Feature -from google.cloud.aiplatform_v1beta1.types.feature_monitoring_stats import FeatureStatsAnomaly -from google.cloud.aiplatform_v1beta1.types.feature_selector import FeatureSelector -from google.cloud.aiplatform_v1beta1.types.feature_selector import IdMatcher -from google.cloud.aiplatform_v1beta1.types.featurestore import Featurestore -from google.cloud.aiplatform_v1beta1.types.featurestore_monitoring import FeaturestoreMonitoringConfig -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import FeatureValue -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import FeatureValueList -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import ReadFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import ReadFeatureValuesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import StreamingReadFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateEntityTypeOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateEntityTypeRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeatureOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeatureRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeaturestoreOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeaturestoreRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteEntityTypeRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteFeatureRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteFeaturestoreRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import DestinationFeatureSetting -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import FeatureValueDestination -from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetEntityTypeRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetFeatureRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetFeaturestoreRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListEntityTypesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListEntityTypesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturestoresRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturestoresResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import SearchFeaturesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import SearchFeaturesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateEntityTypeRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeatureRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeaturestoreOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeaturestoreRequest -from google.cloud.aiplatform_v1beta1.types.hyperparameter_tuning_job import HyperparameterTuningJob -from google.cloud.aiplatform_v1beta1.types.index import Index -from google.cloud.aiplatform_v1beta1.types.index_endpoint import DeployedIndex -from google.cloud.aiplatform_v1beta1.types.index_endpoint import DeployedIndexAuthConfig -from google.cloud.aiplatform_v1beta1.types.index_endpoint import IndexEndpoint -from google.cloud.aiplatform_v1beta1.types.index_endpoint import IndexPrivateEndpoints -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import CreateIndexEndpointOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import CreateIndexEndpointRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeleteIndexEndpointRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexResponse -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import GetIndexEndpointRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import ListIndexEndpointsRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import ListIndexEndpointsResponse -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexResponse -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UpdateIndexEndpointRequest -from google.cloud.aiplatform_v1beta1.types.index_service import CreateIndexOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_service import CreateIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_service import DeleteIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_service import GetIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_service import ListIndexesRequest -from google.cloud.aiplatform_v1beta1.types.index_service import ListIndexesResponse -from google.cloud.aiplatform_v1beta1.types.index_service import NearestNeighborSearchOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_service import UpdateIndexOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_service import UpdateIndexRequest -from google.cloud.aiplatform_v1beta1.types.io import AvroSource -from google.cloud.aiplatform_v1beta1.types.io import BigQueryDestination -from google.cloud.aiplatform_v1beta1.types.io import BigQuerySource -from google.cloud.aiplatform_v1beta1.types.io import ContainerRegistryDestination -from google.cloud.aiplatform_v1beta1.types.io import CsvDestination -from google.cloud.aiplatform_v1beta1.types.io import CsvSource -from google.cloud.aiplatform_v1beta1.types.io import GcsDestination -from google.cloud.aiplatform_v1beta1.types.io import GcsSource -from google.cloud.aiplatform_v1beta1.types.io import TFRecordDestination -from google.cloud.aiplatform_v1beta1.types.job_service import CancelBatchPredictionJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CancelCustomJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CancelDataLabelingJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CancelHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateBatchPredictionJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateCustomJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateDataLabelingJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteBatchPredictionJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteCustomJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteDataLabelingJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetBatchPredictionJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetCustomJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetDataLabelingJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListBatchPredictionJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListBatchPredictionJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import ListCustomJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListCustomJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import ListDataLabelingJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListDataLabelingJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import ListHyperparameterTuningJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListHyperparameterTuningJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import ListModelDeploymentMonitoringJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListModelDeploymentMonitoringJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import PauseModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ResumeModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest -from google.cloud.aiplatform_v1beta1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse -from google.cloud.aiplatform_v1beta1.types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata -from google.cloud.aiplatform_v1beta1.types.job_service import UpdateModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_state import JobState -from google.cloud.aiplatform_v1beta1.types.lineage_subgraph import LineageSubgraph -from google.cloud.aiplatform_v1beta1.types.machine_resources import AutomaticResources -from google.cloud.aiplatform_v1beta1.types.machine_resources import AutoscalingMetricSpec -from google.cloud.aiplatform_v1beta1.types.machine_resources import BatchDedicatedResources -from google.cloud.aiplatform_v1beta1.types.machine_resources import DedicatedResources -from google.cloud.aiplatform_v1beta1.types.machine_resources import DiskSpec -from google.cloud.aiplatform_v1beta1.types.machine_resources import MachineSpec -from google.cloud.aiplatform_v1beta1.types.machine_resources import ResourcesConsumed -from google.cloud.aiplatform_v1beta1.types.manual_batch_tuning_parameters import ManualBatchTuningParameters -from google.cloud.aiplatform_v1beta1.types.metadata_schema import MetadataSchema -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextArtifactsAndExecutionsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextArtifactsAndExecutionsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextChildrenRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextChildrenResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddExecutionEventsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddExecutionEventsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateArtifactRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateContextRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateExecutionRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataSchemaRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataStoreOperationMetadata -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataStoreRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteContextRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteMetadataStoreOperationMetadata -from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteMetadataStoreRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetArtifactRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetContextRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetExecutionRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetMetadataSchemaRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetMetadataStoreRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListArtifactsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListArtifactsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListContextsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListContextsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListExecutionsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListExecutionsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataSchemasRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataSchemasResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataStoresRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataStoresResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryArtifactLineageSubgraphRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryContextLineageSubgraphRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryExecutionInputsAndOutputsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateArtifactRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateContextRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateExecutionRequest -from google.cloud.aiplatform_v1beta1.types.metadata_store import MetadataStore -from google.cloud.aiplatform_v1beta1.types.migratable_resource import MigratableResource -from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesRequest -from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesResponse -from google.cloud.aiplatform_v1beta1.types.migration_service import MigrateResourceRequest -from google.cloud.aiplatform_v1beta1.types.migration_service import MigrateResourceResponse -from google.cloud.aiplatform_v1beta1.types.migration_service import SearchMigratableResourcesRequest -from google.cloud.aiplatform_v1beta1.types.migration_service import SearchMigratableResourcesResponse -from google.cloud.aiplatform_v1beta1.types.model import Model -from google.cloud.aiplatform_v1beta1.types.model import ModelContainerSpec -from google.cloud.aiplatform_v1beta1.types.model import Port -from google.cloud.aiplatform_v1beta1.types.model import PredictSchemata -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType -from google.cloud.aiplatform_v1beta1.types.model_evaluation import ModelEvaluation -from google.cloud.aiplatform_v1beta1.types.model_evaluation_slice import ModelEvaluationSlice -from google.cloud.aiplatform_v1beta1.types.model_monitoring import ModelMonitoringAlertConfig -from google.cloud.aiplatform_v1beta1.types.model_monitoring import ModelMonitoringObjectiveConfig -from google.cloud.aiplatform_v1beta1.types.model_monitoring import SamplingStrategy -from google.cloud.aiplatform_v1beta1.types.model_monitoring import ThresholdConfig -from google.cloud.aiplatform_v1beta1.types.model_service import DeleteModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelOperationMetadata -from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelResponse -from google.cloud.aiplatform_v1beta1.types.model_service import GetModelEvaluationRequest -from google.cloud.aiplatform_v1beta1.types.model_service import GetModelEvaluationSliceRequest -from google.cloud.aiplatform_v1beta1.types.model_service import GetModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationSlicesRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationSlicesResponse -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationsRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationsResponse -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelsRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelsResponse -from google.cloud.aiplatform_v1beta1.types.model_service import UpdateModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelOperationMetadata -from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelResponse -from google.cloud.aiplatform_v1beta1.types.operation import DeleteOperationMetadata -from google.cloud.aiplatform_v1beta1.types.operation import GenericOperationMetadata -from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineJob -from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineJobDetail -from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineTaskDetail -from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineTaskExecutorDetail -from google.cloud.aiplatform_v1beta1.types.pipeline_service import CancelPipelineJobRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import CancelTrainingPipelineRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import CreatePipelineJobRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import CreateTrainingPipelineRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import DeletePipelineJobRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import DeleteTrainingPipelineRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import GetPipelineJobRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import GetTrainingPipelineRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListPipelineJobsRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListPipelineJobsResponse -from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListTrainingPipelinesRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListTrainingPipelinesResponse -from google.cloud.aiplatform_v1beta1.types.pipeline_state import PipelineState -from google.cloud.aiplatform_v1beta1.types.prediction_service import ExplainRequest -from google.cloud.aiplatform_v1beta1.types.prediction_service import ExplainResponse -from google.cloud.aiplatform_v1beta1.types.prediction_service import PredictRequest -from google.cloud.aiplatform_v1beta1.types.prediction_service import PredictResponse -from google.cloud.aiplatform_v1beta1.types.specialist_pool import SpecialistPool -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import CreateSpecialistPoolOperationMetadata -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import CreateSpecialistPoolRequest -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import DeleteSpecialistPoolRequest -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import GetSpecialistPoolRequest -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import ListSpecialistPoolsRequest -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import ListSpecialistPoolsResponse -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import UpdateSpecialistPoolRequest -from google.cloud.aiplatform_v1beta1.types.study import Measurement -from google.cloud.aiplatform_v1beta1.types.study import Study -from google.cloud.aiplatform_v1beta1.types.study import StudySpec -from google.cloud.aiplatform_v1beta1.types.study import Trial -from google.cloud.aiplatform_v1beta1.types.tensorboard import Tensorboard -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import Scalar -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardBlob -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardBlobSequence -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardTensor -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TimeSeriesData -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TimeSeriesDataPoint -from google.cloud.aiplatform_v1beta1.types.tensorboard_experiment import TensorboardExperiment -from google.cloud.aiplatform_v1beta1.types.tensorboard_run import TensorboardRun -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardExperimentRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardOperationMetadata -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardRunRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardExperimentRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardRunRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardExperimentRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardRunRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardExperimentsRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardExperimentsResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardRunsRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardRunsResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardsRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardsResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardTimeSeriesResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardBlobDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardBlobDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardExperimentRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardOperationMetadata -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardRunRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardRunDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardRunDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_time_series import TensorboardTimeSeries -from google.cloud.aiplatform_v1beta1.types.training_pipeline import FilterSplit -from google.cloud.aiplatform_v1beta1.types.training_pipeline import FractionSplit -from google.cloud.aiplatform_v1beta1.types.training_pipeline import InputDataConfig -from google.cloud.aiplatform_v1beta1.types.training_pipeline import PredefinedSplit -from google.cloud.aiplatform_v1beta1.types.training_pipeline import TimestampSplit -from google.cloud.aiplatform_v1beta1.types.training_pipeline import TrainingPipeline -from google.cloud.aiplatform_v1beta1.types.types import BoolArray -from google.cloud.aiplatform_v1beta1.types.types import DoubleArray -from google.cloud.aiplatform_v1beta1.types.types import Int64Array -from google.cloud.aiplatform_v1beta1.types.types import StringArray -from google.cloud.aiplatform_v1beta1.types.user_action_reference import UserActionReference -from google.cloud.aiplatform_v1beta1.types.value import Value -from google.cloud.aiplatform_v1beta1.types.vizier_service import AddTrialMeasurementRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateMetatdata -from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateResponse -from google.cloud.aiplatform_v1beta1.types.vizier_service import CompleteTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import CreateStudyRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import CreateTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import DeleteStudyRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import DeleteTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import GetStudyRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import GetTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListOptimalTrialsRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListOptimalTrialsResponse -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListStudiesRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListStudiesResponse -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListTrialsRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListTrialsResponse -from google.cloud.aiplatform_v1beta1.types.vizier_service import LookupStudyRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import StopTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsMetadata -from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsResponse - -__all__ = ('DatasetServiceClient', - 'DatasetServiceAsyncClient', - 'EndpointServiceClient', - 'EndpointServiceAsyncClient', - 'FeaturestoreOnlineServingServiceClient', - 'FeaturestoreOnlineServingServiceAsyncClient', - 'FeaturestoreServiceClient', - 'FeaturestoreServiceAsyncClient', - 'IndexEndpointServiceClient', - 'IndexEndpointServiceAsyncClient', - 'IndexServiceClient', - 'IndexServiceAsyncClient', - 'JobServiceClient', - 'JobServiceAsyncClient', - 'MetadataServiceClient', - 'MetadataServiceAsyncClient', - 'MigrationServiceClient', - 'MigrationServiceAsyncClient', - 'ModelServiceClient', - 'ModelServiceAsyncClient', - 'PipelineServiceClient', - 'PipelineServiceAsyncClient', - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', - 'SpecialistPoolServiceClient', - 'SpecialistPoolServiceAsyncClient', - 'TensorboardServiceClient', - 'TensorboardServiceAsyncClient', - 'VizierServiceClient', - 'VizierServiceAsyncClient', - 'AcceleratorType', - 'Annotation', - 'AnnotationSpec', - 'Artifact', - 'BatchPredictionJob', - 'CompletionStats', - 'Context', - 'ContainerSpec', - 'CustomJob', - 'CustomJobSpec', - 'PythonPackageSpec', - 'Scheduling', - 'WorkerPoolSpec', - 'DataItem', - 'ActiveLearningConfig', - 'DataLabelingJob', - 'SampleConfig', - 'TrainingConfig', - 'Dataset', - 'ExportDataConfig', - 'ImportDataConfig', - 'CreateDatasetOperationMetadata', - 'CreateDatasetRequest', - 'DeleteDatasetRequest', - 'ExportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'ImportDataOperationMetadata', - 'ImportDataRequest', - 'ImportDataResponse', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeployedIndexRef', - 'DeployedModelRef', - 'EncryptionSpec', - 'DeployedModel', - 'Endpoint', - 'CreateEndpointOperationMetadata', - 'CreateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelOperationMetadata', - 'DeployModelRequest', - 'DeployModelResponse', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UndeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UpdateEndpointRequest', - 'EntityType', - 'EnvVar', - 'Event', - 'Execution', - 'Attribution', - 'Explanation', - 'ExplanationMetadataOverride', - 'ExplanationParameters', - 'ExplanationSpec', - 'ExplanationSpecOverride', - 'FeatureNoiseSigma', - 'IntegratedGradientsAttribution', - 'ModelExplanation', - 'SampledShapleyAttribution', - 'SmoothGradConfig', - 'XraiAttribution', - 'ExplanationMetadata', - 'Feature', - 'FeatureStatsAnomaly', - 'FeatureSelector', - 'IdMatcher', - 'Featurestore', - 'FeaturestoreMonitoringConfig', - 'FeatureValue', - 'FeatureValueList', - 'ReadFeatureValuesRequest', - 'ReadFeatureValuesResponse', - 'StreamingReadFeatureValuesRequest', - 'BatchCreateFeaturesOperationMetadata', - 'BatchCreateFeaturesRequest', - 'BatchCreateFeaturesResponse', - 'BatchReadFeatureValuesOperationMetadata', - 'BatchReadFeatureValuesRequest', - 'BatchReadFeatureValuesResponse', - 'CreateEntityTypeOperationMetadata', - 'CreateEntityTypeRequest', - 'CreateFeatureOperationMetadata', - 'CreateFeatureRequest', - 'CreateFeaturestoreOperationMetadata', - 'CreateFeaturestoreRequest', - 'DeleteEntityTypeRequest', - 'DeleteFeatureRequest', - 'DeleteFeaturestoreRequest', - 'DestinationFeatureSetting', - 'ExportFeatureValuesOperationMetadata', - 'ExportFeatureValuesRequest', - 'ExportFeatureValuesResponse', - 'FeatureValueDestination', - 'GetEntityTypeRequest', - 'GetFeatureRequest', - 'GetFeaturestoreRequest', - 'ImportFeatureValuesOperationMetadata', - 'ImportFeatureValuesRequest', - 'ImportFeatureValuesResponse', - 'ListEntityTypesRequest', - 'ListEntityTypesResponse', - 'ListFeaturesRequest', - 'ListFeaturesResponse', - 'ListFeaturestoresRequest', - 'ListFeaturestoresResponse', - 'SearchFeaturesRequest', - 'SearchFeaturesResponse', - 'UpdateEntityTypeRequest', - 'UpdateFeatureRequest', - 'UpdateFeaturestoreOperationMetadata', - 'UpdateFeaturestoreRequest', - 'HyperparameterTuningJob', - 'Index', - 'DeployedIndex', - 'DeployedIndexAuthConfig', - 'IndexEndpoint', - 'IndexPrivateEndpoints', - 'CreateIndexEndpointOperationMetadata', - 'CreateIndexEndpointRequest', - 'DeleteIndexEndpointRequest', - 'DeployIndexOperationMetadata', - 'DeployIndexRequest', - 'DeployIndexResponse', - 'GetIndexEndpointRequest', - 'ListIndexEndpointsRequest', - 'ListIndexEndpointsResponse', - 'UndeployIndexOperationMetadata', - 'UndeployIndexRequest', - 'UndeployIndexResponse', - 'UpdateIndexEndpointRequest', - 'CreateIndexOperationMetadata', - 'CreateIndexRequest', - 'DeleteIndexRequest', - 'GetIndexRequest', - 'ListIndexesRequest', - 'ListIndexesResponse', - 'NearestNeighborSearchOperationMetadata', - 'UpdateIndexOperationMetadata', - 'UpdateIndexRequest', - 'AvroSource', - 'BigQueryDestination', - 'BigQuerySource', - 'ContainerRegistryDestination', - 'CsvDestination', - 'CsvSource', - 'GcsDestination', - 'GcsSource', - 'TFRecordDestination', - 'CancelBatchPredictionJobRequest', - 'CancelCustomJobRequest', - 'CancelDataLabelingJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'CreateCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'CreateModelDeploymentMonitoringJobRequest', - 'DeleteBatchPredictionJobRequest', - 'DeleteCustomJobRequest', - 'DeleteDataLabelingJobRequest', - 'DeleteHyperparameterTuningJobRequest', - 'DeleteModelDeploymentMonitoringJobRequest', - 'GetBatchPredictionJobRequest', - 'GetCustomJobRequest', - 'GetDataLabelingJobRequest', - 'GetHyperparameterTuningJobRequest', - 'GetModelDeploymentMonitoringJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'ListModelDeploymentMonitoringJobsRequest', - 'ListModelDeploymentMonitoringJobsResponse', - 'PauseModelDeploymentMonitoringJobRequest', - 'ResumeModelDeploymentMonitoringJobRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', - 'UpdateModelDeploymentMonitoringJobOperationMetadata', - 'UpdateModelDeploymentMonitoringJobRequest', - 'JobState', - 'LineageSubgraph', - 'AutomaticResources', - 'AutoscalingMetricSpec', - 'BatchDedicatedResources', - 'DedicatedResources', - 'DiskSpec', - 'MachineSpec', - 'ResourcesConsumed', - 'ManualBatchTuningParameters', - 'MetadataSchema', - 'AddContextArtifactsAndExecutionsRequest', - 'AddContextArtifactsAndExecutionsResponse', - 'AddContextChildrenRequest', - 'AddContextChildrenResponse', - 'AddExecutionEventsRequest', - 'AddExecutionEventsResponse', - 'CreateArtifactRequest', - 'CreateContextRequest', - 'CreateExecutionRequest', - 'CreateMetadataSchemaRequest', - 'CreateMetadataStoreOperationMetadata', - 'CreateMetadataStoreRequest', - 'DeleteContextRequest', - 'DeleteMetadataStoreOperationMetadata', - 'DeleteMetadataStoreRequest', - 'GetArtifactRequest', - 'GetContextRequest', - 'GetExecutionRequest', - 'GetMetadataSchemaRequest', - 'GetMetadataStoreRequest', - 'ListArtifactsRequest', - 'ListArtifactsResponse', - 'ListContextsRequest', - 'ListContextsResponse', - 'ListExecutionsRequest', - 'ListExecutionsResponse', - 'ListMetadataSchemasRequest', - 'ListMetadataSchemasResponse', - 'ListMetadataStoresRequest', - 'ListMetadataStoresResponse', - 'QueryArtifactLineageSubgraphRequest', - 'QueryContextLineageSubgraphRequest', - 'QueryExecutionInputsAndOutputsRequest', - 'UpdateArtifactRequest', - 'UpdateContextRequest', - 'UpdateExecutionRequest', - 'MetadataStore', - 'MigratableResource', - 'BatchMigrateResourcesOperationMetadata', - 'BatchMigrateResourcesRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceRequest', - 'MigrateResourceResponse', - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'Model', - 'ModelContainerSpec', - 'Port', - 'PredictSchemata', - 'ModelDeploymentMonitoringBigQueryTable', - 'ModelDeploymentMonitoringJob', - 'ModelDeploymentMonitoringObjectiveConfig', - 'ModelDeploymentMonitoringScheduleConfig', - 'ModelMonitoringStatsAnomalies', - 'ModelDeploymentMonitoringObjectiveType', - 'ModelEvaluation', - 'ModelEvaluationSlice', - 'ModelMonitoringAlertConfig', - 'ModelMonitoringObjectiveConfig', - 'SamplingStrategy', - 'ThresholdConfig', - 'DeleteModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelRequest', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'GetModelEvaluationSliceRequest', - 'GetModelRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelRequest', - 'UploadModelResponse', - 'DeleteOperationMetadata', - 'GenericOperationMetadata', - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - 'CancelPipelineJobRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'CreateTrainingPipelineRequest', - 'DeletePipelineJobRequest', - 'DeleteTrainingPipelineRequest', - 'GetPipelineJobRequest', - 'GetTrainingPipelineRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'PipelineState', - 'ExplainRequest', - 'ExplainResponse', - 'PredictRequest', - 'PredictResponse', - 'SpecialistPool', - 'CreateSpecialistPoolOperationMetadata', - 'CreateSpecialistPoolRequest', - 'DeleteSpecialistPoolRequest', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'UpdateSpecialistPoolOperationMetadata', - 'UpdateSpecialistPoolRequest', - 'Measurement', - 'Study', - 'StudySpec', - 'Trial', - 'Tensorboard', - 'Scalar', - 'TensorboardBlob', - 'TensorboardBlobSequence', - 'TensorboardTensor', - 'TimeSeriesData', - 'TimeSeriesDataPoint', - 'TensorboardExperiment', - 'TensorboardRun', - 'CreateTensorboardExperimentRequest', - 'CreateTensorboardOperationMetadata', - 'CreateTensorboardRequest', - 'CreateTensorboardRunRequest', - 'CreateTensorboardTimeSeriesRequest', - 'DeleteTensorboardExperimentRequest', - 'DeleteTensorboardRequest', - 'DeleteTensorboardRunRequest', - 'DeleteTensorboardTimeSeriesRequest', - 'ExportTensorboardTimeSeriesDataRequest', - 'ExportTensorboardTimeSeriesDataResponse', - 'GetTensorboardExperimentRequest', - 'GetTensorboardRequest', - 'GetTensorboardRunRequest', - 'GetTensorboardTimeSeriesRequest', - 'ListTensorboardExperimentsRequest', - 'ListTensorboardExperimentsResponse', - 'ListTensorboardRunsRequest', - 'ListTensorboardRunsResponse', - 'ListTensorboardsRequest', - 'ListTensorboardsResponse', - 'ListTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesResponse', - 'ReadTensorboardBlobDataRequest', - 'ReadTensorboardBlobDataResponse', - 'ReadTensorboardTimeSeriesDataRequest', - 'ReadTensorboardTimeSeriesDataResponse', - 'UpdateTensorboardExperimentRequest', - 'UpdateTensorboardOperationMetadata', - 'UpdateTensorboardRequest', - 'UpdateTensorboardRunRequest', - 'UpdateTensorboardTimeSeriesRequest', - 'WriteTensorboardRunDataRequest', - 'WriteTensorboardRunDataResponse', - 'TensorboardTimeSeries', - 'FilterSplit', - 'FractionSplit', - 'InputDataConfig', - 'PredefinedSplit', - 'TimestampSplit', - 'TrainingPipeline', - 'BoolArray', - 'DoubleArray', - 'Int64Array', - 'StringArray', - 'UserActionReference', - 'Value', - 'AddTrialMeasurementRequest', - 'CheckTrialEarlyStoppingStateMetatdata', - 'CheckTrialEarlyStoppingStateRequest', - 'CheckTrialEarlyStoppingStateResponse', - 'CompleteTrialRequest', - 'CreateStudyRequest', - 'CreateTrialRequest', - 'DeleteStudyRequest', - 'DeleteTrialRequest', - 'GetStudyRequest', - 'GetTrialRequest', - 'ListOptimalTrialsRequest', - 'ListOptimalTrialsResponse', - 'ListStudiesRequest', - 'ListStudiesResponse', - 'ListTrialsRequest', - 'ListTrialsResponse', - 'LookupStudyRequest', - 'StopTrialRequest', - 'SuggestTrialsMetadata', - 'SuggestTrialsRequest', - 'SuggestTrialsResponse', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed deleted file mode 100644 index 228f1c51c6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py deleted file mode 100644 index 5f9e065de0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ImageClassificationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_segmentation import ImageSegmentationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_classification import TextClassificationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_extraction import TextExtractionPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_sentiment import TextSentimentPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_classification import VideoClassificationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ('ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed deleted file mode 100644 index 46ccbaf568..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py deleted file mode 100644 index 41ab5407a7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionInstance -from .types.image_object_detection import ImageObjectDetectionPredictionInstance -from .types.image_segmentation import ImageSegmentationPredictionInstance -from .types.text_classification import TextClassificationPredictionInstance -from .types.text_extraction import TextExtractionPredictionInstance -from .types.text_sentiment import TextSentimentPredictionInstance -from .types.video_action_recognition import VideoActionRecognitionPredictionInstance -from .types.video_classification import VideoClassificationPredictionInstance -from .types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ( -'ImageClassificationPredictionInstance', -'ImageObjectDetectionPredictionInstance', -'ImageSegmentationPredictionInstance', -'TextClassificationPredictionInstance', -'TextExtractionPredictionInstance', -'TextSentimentPredictionInstance', -'VideoActionRecognitionPredictionInstance', -'VideoClassificationPredictionInstance', -'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json deleted file mode 100644 index 38379e8208..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed deleted file mode 100644 index 46ccbaf568..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py deleted file mode 100644 index 80a5332604..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionInstance, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionInstance, -) -from .image_segmentation import ( - ImageSegmentationPredictionInstance, -) -from .text_classification import ( - TextClassificationPredictionInstance, -) -from .text_extraction import ( - TextExtractionPredictionInstance, -) -from .text_sentiment import ( - TextSentimentPredictionInstance, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionInstance, -) -from .video_classification import ( - VideoClassificationPredictionInstance, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionInstance, -) - -__all__ = ( - 'ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py deleted file mode 100644 index aac9e2bc91..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'ImageClassificationPredictionInstance', - }, -) - - -class ImageClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Image Classification. - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py deleted file mode 100644 index 80a5d797a1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'ImageObjectDetectionPredictionInstance', - }, -) - - -class ImageObjectDetectionPredictionInstance(proto.Message): - r"""Prediction input format for Image Object Detection. - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py deleted file mode 100644 index e1b5cfc21f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'ImageSegmentationPredictionInstance', - }, -) - - -class ImageSegmentationPredictionInstance(proto.Message): - r"""Prediction input format for Image Segmentation. - Attributes: - content (str): - The image bytes to make the predictions on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/png - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py deleted file mode 100644 index 0c1ea43a72..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'TextClassificationPredictionInstance', - }, -) - - -class TextClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Text Classification. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py deleted file mode 100644 index 8f47b27080..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'TextExtractionPredictionInstance', - }, -) - - -class TextExtractionPredictionInstance(proto.Message): - r"""Prediction input format for Text Extraction. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - key (str): - This field is only used for batch prediction. - If a key is provided, the batch prediction - result will by mapped to this key. If omitted, - then the batch prediction result will contain - the entire input instance. Vertex AI will not - check if keys in the request are duplicates, so - it is up to the caller to ensure the keys are - unique. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - key = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py deleted file mode 100644 index ab416779b6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'TextSentimentPredictionInstance', - }, -) - - -class TextSentimentPredictionInstance(proto.Message): - r"""Prediction input format for Text Sentiment. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py deleted file mode 100644 index c7a76efda2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'VideoActionRecognitionPredictionInstance', - }, -) - - -class VideoActionRecognitionPredictionInstance(proto.Message): - r"""Prediction input format for Video Action Recognition. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py deleted file mode 100644 index 56d662ef88..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'VideoClassificationPredictionInstance', - }, -) - - -class VideoClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Video Classification. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py deleted file mode 100644 index 7344d419a8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'VideoObjectTrackingPredictionInstance', - }, -) - - -class VideoObjectTrackingPredictionInstance(proto.Message): - r"""Prediction input format for Video Object Tracking. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py deleted file mode 100644 index 464c39f26c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ImageClassificationPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_segmentation import ImageSegmentationPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_classification import VideoClassificationPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ('ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed deleted file mode 100644 index acdcd7bc60..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py deleted file mode 100644 index 91b718b437..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionParams -from .types.image_object_detection import ImageObjectDetectionPredictionParams -from .types.image_segmentation import ImageSegmentationPredictionParams -from .types.video_action_recognition import VideoActionRecognitionPredictionParams -from .types.video_classification import VideoClassificationPredictionParams -from .types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ( -'ImageClassificationPredictionParams', -'ImageObjectDetectionPredictionParams', -'ImageSegmentationPredictionParams', -'VideoActionRecognitionPredictionParams', -'VideoClassificationPredictionParams', -'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json deleted file mode 100644 index 6b925dd9dc..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed deleted file mode 100644 index acdcd7bc60..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py deleted file mode 100644 index 70a92bb59c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionParams, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionParams, -) -from .image_segmentation import ( - ImageSegmentationPredictionParams, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionParams, -) -from .video_classification import ( - VideoClassificationPredictionParams, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionParams, -) - -__all__ = ( - 'ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py deleted file mode 100644 index 67c5453a93..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'ImageClassificationPredictionParams', - }, -) - - -class ImageClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Classification. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py deleted file mode 100644 index baed8905ee..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'ImageObjectDetectionPredictionParams', - }, -) - - -class ImageObjectDetectionPredictionParams(proto.Message): - r"""Prediction model parameters for Image Object Detection. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - Note that number of returned predictions is also - limited by metadata's predictionsLimit. Default - value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py deleted file mode 100644 index 8a5e999504..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'ImageSegmentationPredictionParams', - }, -) - - -class ImageSegmentationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Segmentation. - Attributes: - confidence_threshold (float): - When the model predicts category of pixels of - the image, it will only provide predictions for - pixels that it is at least this much confident - about. All other pixels will be classified as - background. Default value is 0.5. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py deleted file mode 100644 index 37a8c2bc9c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'VideoActionRecognitionPredictionParams', - }, -) - - -class VideoActionRecognitionPredictionParams(proto.Message): - r"""Prediction model parameters for Video Action Recognition. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py deleted file mode 100644 index ff7bbd1b86..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'VideoClassificationPredictionParams', - }, -) - - -class VideoClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Video Classification. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is - 10,000. - segment_classification (bool): - Set to true to request segment-level - classification. Vertex AI returns labels and - their confidence scores for the entire time - segment of the video that user specified in the - input instance. Default value is true - shot_classification (bool): - Set to true to request shot-level - classification. Vertex AI determines the - boundaries for each camera shot in the entire - time segment of the video that user specified in - the input instance. Vertex AI then returns - labels and their confidence scores for each - detected shot, along with the start and end time - of the shot. - WARNING: Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. - Default value is false - one_sec_interval_classification (bool): - Set to true to request classification for a - video at one-second intervals. Vertex AI returns - labels and their confidence scores for each - second of the entire time segment of the video - that user specified in the input WARNING: Model - evaluation is not done for this classification - type, the quality of it depends on the training - data, but there are no metrics provided to - describe that quality. Default value is false - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - segment_classification = proto.Field( - proto.BOOL, - number=3, - ) - shot_classification = proto.Field( - proto.BOOL, - number=4, - ) - one_sec_interval_classification = proto.Field( - proto.BOOL, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py deleted file mode 100644 index 4e0e97f8d6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'VideoObjectTrackingPredictionParams', - }, -) - - -class VideoObjectTrackingPredictionParams(proto.Message): - r"""Prediction model parameters for Video Object Tracking. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - min_bounding_box_size (float): - Only bounding boxes with shortest edge at - least that long as a relative value of video - frame size are returned. Default value is 0.0. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - min_bounding_box_size = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py deleted file mode 100644 index 0b54451ca0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ClassificationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_segmentation import ImageSegmentationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_classification import TabularClassificationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_regression import TabularRegressionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_extraction import TextExtractionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import TextSentimentPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import TimeSeriesForecastingPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_classification import VideoClassificationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ('ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'TimeSeriesForecastingPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed deleted file mode 100644 index 8cf97d7107..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py deleted file mode 100644 index 495759c24b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.classification import ClassificationPredictionResult -from .types.image_object_detection import ImageObjectDetectionPredictionResult -from .types.image_segmentation import ImageSegmentationPredictionResult -from .types.tabular_classification import TabularClassificationPredictionResult -from .types.tabular_regression import TabularRegressionPredictionResult -from .types.text_extraction import TextExtractionPredictionResult -from .types.text_sentiment import TextSentimentPredictionResult -from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult -from .types.video_action_recognition import VideoActionRecognitionPredictionResult -from .types.video_classification import VideoClassificationPredictionResult -from .types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ( -'ClassificationPredictionResult', -'ImageObjectDetectionPredictionResult', -'ImageSegmentationPredictionResult', -'TabularClassificationPredictionResult', -'TabularRegressionPredictionResult', -'TextExtractionPredictionResult', -'TextSentimentPredictionResult', -'TimeSeriesForecastingPredictionResult', -'VideoActionRecognitionPredictionResult', -'VideoClassificationPredictionResult', -'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json deleted file mode 100644 index 99d3dc6402..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed deleted file mode 100644 index 8cf97d7107..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py deleted file mode 100644 index f3b70f66dd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .classification import ( - ClassificationPredictionResult, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionResult, -) -from .image_segmentation import ( - ImageSegmentationPredictionResult, -) -from .tabular_classification import ( - TabularClassificationPredictionResult, -) -from .tabular_regression import ( - TabularRegressionPredictionResult, -) -from .text_extraction import ( - TextExtractionPredictionResult, -) -from .text_sentiment import ( - TextSentimentPredictionResult, -) -from .time_series_forecasting import ( - TimeSeriesForecastingPredictionResult, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionResult, -) -from .video_classification import ( - VideoClassificationPredictionResult, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionResult, -) - -__all__ = ( - 'ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'TimeSeriesForecastingPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py deleted file mode 100644 index ecd16c7250..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'ClassificationPredictionResult', - }, -) - - -class ClassificationPredictionResult(proto.Message): - r"""Prediction output format for Image and Text Classification. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py deleted file mode 100644 index d787871e99..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'ImageObjectDetectionPredictionResult', - }, -) - - -class ImageObjectDetectionPredictionResult(proto.Message): - r"""Prediction output format for Image Object Detection. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): - Bounding boxes, i.e. the rectangles over the image, that - pinpoint the found AnnotationSpecs. Given in order that - matches the IDs. Each bounding box is an array of 4 numbers - ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent - the extremal coordinates of the box. They are relative to - the image size, and the point 0,0 is in the top left of the - image. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - bboxes = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=struct_pb2.ListValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py deleted file mode 100644 index 92cc20720c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'ImageSegmentationPredictionResult', - }, -) - - -class ImageSegmentationPredictionResult(proto.Message): - r"""Prediction output format for Image Segmentation. - Attributes: - category_mask (str): - A PNG image where each pixel in the mask - represents the category in which the pixel in - the original image was predicted to belong to. - The size of this image will be the same as the - original image. The mapping between the - AnntoationSpec and the color can be found in - model's metadata. The model will choose the most - likely category and if none of the categories - reach the confidence threshold, the pixel will - be marked as background. - confidence_mask (str): - A one channel image which is encoded as an - 8bit lossless PNG. The size of the image will be - the same as the original image. For a specific - pixel, darker color means less confidence in - correctness of the cateogry in the categoryMask - for the corresponding pixel. Black means no - confidence and white means complete confidence. - """ - - category_mask = proto.Field( - proto.STRING, - number=1, - ) - confidence_mask = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py deleted file mode 100644 index 8a437022fd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TabularClassificationPredictionResult', - }, -) - - -class TabularClassificationPredictionResult(proto.Message): - r"""Prediction output format for Tabular Classification. - Attributes: - classes (Sequence[str]): - The name of the classes being classified, - contains all possible values of the target - column. - scores (Sequence[float]): - The model's confidence in each class being - correct, higher value means higher confidence. - The N-th score corresponds to the N-th class in - classes. - """ - - classes = proto.RepeatedField( - proto.STRING, - number=1, - ) - scores = proto.RepeatedField( - proto.FLOAT, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py deleted file mode 100644 index a49f6f55ce..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TabularRegressionPredictionResult', - }, -) - - -class TabularRegressionPredictionResult(proto.Message): - r"""Prediction output format for Tabular Regression. - Attributes: - value (float): - The regression value. - lower_bound (float): - The lower bound of the prediction interval. - upper_bound (float): - The upper bound of the prediction interval. - """ - - value = proto.Field( - proto.FLOAT, - number=1, - ) - lower_bound = proto.Field( - proto.FLOAT, - number=2, - ) - upper_bound = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py deleted file mode 100644 index a92d9caefa..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TextExtractionPredictionResult', - }, -) - - -class TextExtractionPredictionResult(proto.Message): - r"""Prediction output format for Text Extraction. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - text_segment_start_offsets (Sequence[int]): - The start offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - text_segment_end_offsets (Sequence[int]): - The end offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - text_segment_start_offsets = proto.RepeatedField( - proto.INT64, - number=3, - ) - text_segment_end_offsets = proto.RepeatedField( - proto.INT64, - number=4, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py deleted file mode 100644 index 4967b02aae..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TextSentimentPredictionResult', - }, -) - - -class TextSentimentPredictionResult(proto.Message): - r"""Prediction output format for Text Sentiment - Attributes: - sentiment (int): - The integer sentiment labels between 0 - (inclusive) and sentimentMax label (inclusive), - while 0 maps to the least positive sentiment and - sentimentMax maps to the most positive one. The - higher the score is, the more positive the - sentiment in the text snippet is. Note: - sentimentMax is an integer value between 1 - (inclusive) and 10 (inclusive). - """ - - sentiment = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py deleted file mode 100644 index 67a3cd9dff..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TimeSeriesForecastingPredictionResult', - }, -) - - -class TimeSeriesForecastingPredictionResult(proto.Message): - r"""Prediction output format for Time Series Forecasting. - Attributes: - value (float): - The regression value. - """ - - value = proto.Field( - proto.FLOAT, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py deleted file mode 100644 index bc53328da4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'VideoActionRecognitionPredictionResult', - }, -) - - -class VideoActionRecognitionPredictionResult(proto.Message): - r"""Prediction output format for Video Action Recognition. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py deleted file mode 100644 index 95439add5e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'VideoClassificationPredictionResult', - }, -) - - -class VideoClassificationPredictionResult(proto.Message): - r"""Prediction output format for Video Classification. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - type_ (str): - The type of the prediction. The requested - types can be configured via parameters. This - will be one of - segment-classification - - shot-classification - - one-sec-interval-classification - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentStart' from the - input instance, for other types it is the start - of a shot or a 1 second interval respectively. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentEnd' from the - input instance, for other types it is the end of - a shot or a 1 second interval respectively. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - type_ = proto.Field( - proto.STRING, - number=3, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py deleted file mode 100644 index 34cf7ab1b9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'VideoObjectTrackingPredictionResult', - }, -) - - -class VideoObjectTrackingPredictionResult(proto.Message): - r"""Prediction output format for Video Object Tracking. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - frames (Sequence[google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.VideoObjectTrackingPredictionResult.Frame]): - All of the frames of the video in which a - single object instance has been detected. The - bounding boxes in the frames identify the same - object. - """ - - class Frame(proto.Message): - r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a - bounding box, i.e. the rectangle over the video frame pinpointing - the found AnnotationSpec. The coordinates are relative to the frame - size, and the point 0,0 is in the top left of the frame. - - Attributes: - time_offset (google.protobuf.duration_pb2.Duration): - A time (frame) of a video in which the object - has been detected. Expressed as a number of - seconds as measured from the start of the video, - with fractions up to a microsecond precision, - and with "s" appended at the end. - x_min (google.protobuf.wrappers_pb2.FloatValue): - The leftmost coordinate of the bounding box. - x_max (google.protobuf.wrappers_pb2.FloatValue): - The rightmost coordinate of the bounding box. - y_min (google.protobuf.wrappers_pb2.FloatValue): - The topmost coordinate of the bounding box. - y_max (google.protobuf.wrappers_pb2.FloatValue): - The bottommost coordinate of the bounding - box. - """ - - time_offset = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - x_min = proto.Field( - proto.MESSAGE, - number=2, - message=wrappers_pb2.FloatValue, - ) - x_max = proto.Field( - proto.MESSAGE, - number=3, - message=wrappers_pb2.FloatValue, - ) - y_min = proto.Field( - proto.MESSAGE, - number=4, - message=wrappers_pb2.FloatValue, - ) - y_max = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=3, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - frames = proto.RepeatedField( - proto.MESSAGE, - number=6, - message=Frame, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py deleted file mode 100644 index b31ea1309d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassification -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassificationInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassificationMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetection -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetectionInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentation -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentationInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentationMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTables -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTablesInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTablesMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import AutoMlTextClassification -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import AutoMlTextClassificationInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import AutoMlTextExtraction -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import AutoMlTextExtractionInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import AutoMlTextSentiment -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import AutoMlTextSentimentInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecasting -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecastingInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecastingMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import AutoMlVideoActionRecognition -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import AutoMlVideoClassification -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import AutoMlVideoClassificationInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import AutoMlVideoObjectTracking -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig - -__all__ = ('AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - 'AutoMlForecasting', - 'AutoMlForecastingInputs', - 'AutoMlForecastingMetadata', - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - 'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed deleted file mode 100644 index 98af260cd7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py deleted file mode 100644 index 83f5b0d33f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.automl_image_classification import AutoMlImageClassification -from .types.automl_image_classification import AutoMlImageClassificationInputs -from .types.automl_image_classification import AutoMlImageClassificationMetadata -from .types.automl_image_object_detection import AutoMlImageObjectDetection -from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs -from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata -from .types.automl_image_segmentation import AutoMlImageSegmentation -from .types.automl_image_segmentation import AutoMlImageSegmentationInputs -from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata -from .types.automl_tables import AutoMlTables -from .types.automl_tables import AutoMlTablesInputs -from .types.automl_tables import AutoMlTablesMetadata -from .types.automl_text_classification import AutoMlTextClassification -from .types.automl_text_classification import AutoMlTextClassificationInputs -from .types.automl_text_extraction import AutoMlTextExtraction -from .types.automl_text_extraction import AutoMlTextExtractionInputs -from .types.automl_text_sentiment import AutoMlTextSentiment -from .types.automl_text_sentiment import AutoMlTextSentimentInputs -from .types.automl_time_series_forecasting import AutoMlForecasting -from .types.automl_time_series_forecasting import AutoMlForecastingInputs -from .types.automl_time_series_forecasting import AutoMlForecastingMetadata -from .types.automl_video_action_recognition import AutoMlVideoActionRecognition -from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs -from .types.automl_video_classification import AutoMlVideoClassification -from .types.automl_video_classification import AutoMlVideoClassificationInputs -from .types.automl_video_object_tracking import AutoMlVideoObjectTracking -from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs -from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig - -__all__ = ( -'AutoMlForecasting', -'AutoMlForecastingInputs', -'AutoMlForecastingMetadata', -'AutoMlImageClassification', -'AutoMlImageClassificationInputs', -'AutoMlImageClassificationMetadata', -'AutoMlImageObjectDetection', -'AutoMlImageObjectDetectionInputs', -'AutoMlImageObjectDetectionMetadata', -'AutoMlImageSegmentation', -'AutoMlImageSegmentationInputs', -'AutoMlImageSegmentationMetadata', -'AutoMlTables', -'AutoMlTablesInputs', -'AutoMlTablesMetadata', -'AutoMlTextClassification', -'AutoMlTextClassificationInputs', -'AutoMlTextExtraction', -'AutoMlTextExtractionInputs', -'AutoMlTextSentiment', -'AutoMlTextSentimentInputs', -'AutoMlVideoActionRecognition', -'AutoMlVideoActionRecognitionInputs', -'AutoMlVideoClassification', -'AutoMlVideoClassificationInputs', -'AutoMlVideoObjectTracking', -'AutoMlVideoObjectTrackingInputs', -'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json deleted file mode 100644 index 6de794c90a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed deleted file mode 100644 index 98af260cd7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py deleted file mode 100644 index c120afb8d9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .automl_image_classification import ( - AutoMlImageClassification, - AutoMlImageClassificationInputs, - AutoMlImageClassificationMetadata, -) -from .automl_image_object_detection import ( - AutoMlImageObjectDetection, - AutoMlImageObjectDetectionInputs, - AutoMlImageObjectDetectionMetadata, -) -from .automl_image_segmentation import ( - AutoMlImageSegmentation, - AutoMlImageSegmentationInputs, - AutoMlImageSegmentationMetadata, -) -from .automl_tables import ( - AutoMlTables, - AutoMlTablesInputs, - AutoMlTablesMetadata, -) -from .automl_text_classification import ( - AutoMlTextClassification, - AutoMlTextClassificationInputs, -) -from .automl_text_extraction import ( - AutoMlTextExtraction, - AutoMlTextExtractionInputs, -) -from .automl_text_sentiment import ( - AutoMlTextSentiment, - AutoMlTextSentimentInputs, -) -from .automl_time_series_forecasting import ( - AutoMlForecasting, - AutoMlForecastingInputs, - AutoMlForecastingMetadata, -) -from .automl_video_action_recognition import ( - AutoMlVideoActionRecognition, - AutoMlVideoActionRecognitionInputs, -) -from .automl_video_classification import ( - AutoMlVideoClassification, - AutoMlVideoClassificationInputs, -) -from .automl_video_object_tracking import ( - AutoMlVideoObjectTracking, - AutoMlVideoObjectTrackingInputs, -) -from .export_evaluated_data_items_config import ( - ExportEvaluatedDataItemsConfig, -) - -__all__ = ( - 'AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - 'AutoMlForecasting', - 'AutoMlForecastingInputs', - 'AutoMlForecastingMetadata', - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - 'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py deleted file mode 100644 index 70afa83c40..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py +++ /dev/null @@ -1,156 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - }, -) - - -class AutoMlImageClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageClassificationInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageClassificationMetadata', - ) - - -class AutoMlImageClassificationInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs.ModelType): - - base_model_id (str): - The ID of the ``base`` model. If it is specified, the new - model will be trained based on the ``base`` model. - Otherwise, the new model will be trained from scratch. The - ``base`` model must be in the same Project and Location as - the new Model to train, and have the same modelType. - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. For modelType - ``cloud``\ (default), the budget must be between 8,000 and - 800,000 milli node hours, inclusive. The default value is - 192,000 which represents one day in wall time, considering 8 - nodes are used. For model types ``mobile-tf-low-latency-1``, - ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, - the training budget must be between 1,000 and 100,000 milli - node hours, inclusive. The default value is 24,000 which - represents one day in wall time on a single node that is - used. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. When false the early - stopping feature is enabled, which means that - AutoML Image Classification might stop training - before the entire training budget has been used. - multi_label (bool): - If false, a single-label (multi-class) Model - will be trained (i.e. assuming that for each - image just up to one annotation may be - applicable). If true, a multi-label Model will - be trained (i.e. assuming that for each image - multiple annotations may be applicable). - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_TF_LOW_LATENCY_1 = 2 - MOBILE_TF_VERSATILE_1 = 3 - MOBILE_TF_HIGH_ACCURACY_1 = 4 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - base_model_id = proto.Field( - proto.STRING, - number=2, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=3, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=4, - ) - multi_label = proto.Field( - proto.BOOL, - number=5, - ) - - -class AutoMlImageClassificationMetadata(proto.Message): - r""" - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py deleted file mode 100644 index eba2aa5fce..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - }, -) - - -class AutoMlImageObjectDetection(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image Object - Detection Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata): - The metadata information - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageObjectDetectionInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageObjectDetectionMetadata', - ) - - -class AutoMlImageObjectDetectionInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs.ModelType): - - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. For modelType - ``cloud``\ (default), the budget must be between 20,000 and - 900,000 milli node hours, inclusive. The default value is - 216,000 which represents one day in wall time, considering 9 - nodes are used. For model types ``mobile-tf-low-latency-1``, - ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the - training budget must be between 1,000 and 100,000 milli node - hours, inclusive. The default value is 24,000 which - represents one day in wall time on a single node that is - used. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. When false the early - stopping feature is enabled, which means that - AutoML Image Object Detection might stop - training before the entire training budget has - been used. - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD_HIGH_ACCURACY_1 = 1 - CLOUD_LOW_LATENCY_1 = 2 - MOBILE_TF_LOW_LATENCY_1 = 3 - MOBILE_TF_VERSATILE_1 = 4 - MOBILE_TF_HIGH_ACCURACY_1 = 5 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=2, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=3, - ) - - -class AutoMlImageObjectDetectionMetadata(proto.Message): - r""" - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py deleted file mode 100644 index 1bf67523b2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - }, -) - - -class AutoMlImageSegmentation(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image - Segmentation Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageSegmentationInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageSegmentationMetadata', - ) - - -class AutoMlImageSegmentationInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs.ModelType): - - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. Or actaul_wall_clock_hours = - train_budget_milli_node_hours / (number_of_nodes_involved \* - 1000) For modelType ``cloud-high-accuracy-1``\ (default), - the budget must be between 20,000 and 2,000,000 milli node - hours, inclusive. The default value is 192,000 which - represents one day in wall time (1000 milli \* 24 hours \* 8 - nodes). - base_model_id (str): - The ID of the ``base`` model. If it is specified, the new - model will be trained based on the ``base`` model. - Otherwise, the new model will be trained from scratch. The - ``base`` model must be in the same Project and Location as - the new Model to train, and have the same modelType. - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD_HIGH_ACCURACY_1 = 1 - CLOUD_LOW_ACCURACY_1 = 2 - MOBILE_TF_LOW_LATENCY_1 = 3 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=2, - ) - base_model_id = proto.Field( - proto.STRING, - number=3, - ) - - -class AutoMlImageSegmentationMetadata(proto.Message): - r""" - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py deleted file mode 100644 index 23d1375aea..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py +++ /dev/null @@ -1,499 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - }, -) - - -class AutoMlTables(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Tables Model. - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTablesInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlTablesMetadata', - ) - - -class AutoMlTablesInputs(proto.Message): - r""" - Attributes: - optimization_objective_recall_value (float): - Required when optimization_objective is - "maximize-precision-at-recall". Must be between 0 and 1, - inclusive. - optimization_objective_precision_value (float): - Required when optimization_objective is - "maximize-recall-at-precision". Must be between 0 and 1, - inclusive. - prediction_type (str): - The type of prediction the Model is to - produce. "classification" - Predict one out of - multiple target values is - picked for each row. - "regression" - Predict a value based on its - relation to other values. This - type is available only to columns that contain - semantically numeric values, i.e. integers or - floating point number, even if - stored as e.g. strings. - target_column (str): - The column name of the target column that the - model is to predict. - transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation]): - Each transformation will apply transform - function to given input column. And the result - will be used for training. When creating - transformation for BigQuery Struct column, the - column should be flattened using "." as the - delimiter. - optimization_objective (str): - Objective function the model is optimizing - towards. The training process creates a model - that maximizes/minimizes the value of the - objective function over the validation set. - - The supported optimization objectives depend on - the prediction type. If the field is not set, a - default objective function is used. - classification (binary): - "maximize-au-roc" (default) - Maximize the - area under the receiver - operating characteristic (ROC) curve. - "minimize-log-loss" - Minimize log loss. - "maximize-au-prc" - Maximize the area under - the precision-recall curve. "maximize- - precision-at-recall" - Maximize precision for a - specified - recall value. "maximize-recall-at-precision" - - Maximize recall for a specified - precision value. - classification (multi-class): - "minimize-log-loss" (default) - Minimize log - loss. - regression: - "minimize-rmse" (default) - Minimize root- - mean-squared error (RMSE). "minimize-mae" - - Minimize mean-absolute error (MAE). "minimize- - rmsle" - Minimize root-mean-squared log error - (RMSLE). - train_budget_milli_node_hours (int): - Required. The train budget of creating this - model, expressed in milli node hours i.e. 1,000 - value in this field means 1 node hour. - The training cost of the model will not exceed - this budget. The final cost will be attempted to - be close to the budget, though may end up being - (even) noticeably smaller - at the backend's - discretion. This especially may happen when - further model training ceases to provide any - improvements. - If the budget is set to a value known to be - insufficient to train a model for the given - dataset, the training won't be attempted and - will error. - - The train budget must be between 1,000 and - 72,000 milli node hours, inclusive. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. By default, the - early stopping feature is enabled, which means - that AutoML Tables might stop training before - the entire training budget has been used. - weight_column_name (str): - Column name that should be used as the weight - column. Higher values in this column give more - importance to the row during model training. The - column must have numeric values between 0 and - 10000 inclusively; 0 means the row is ignored - for training. If weight column field is not set, - then all rows are assumed to have equal weight - of 1. - export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig): - Configuration for exporting test set - predictions to a BigQuery table. If this - configuration is absent, then the export is not - performed. - additional_experiments (Sequence[str]): - Additional experiment flags for the Tables - training pipeline. - """ - - class Transformation(proto.Message): - r""" - Attributes: - auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.AutoTransformation): - - numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericTransformation): - - categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation): - - timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TimestampTransformation): - - text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextTransformation): - - repeated_numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation): - - repeated_categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): - - repeated_text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation): - - """ - - class AutoTransformation(proto.Message): - r"""Training pipeline will infer the proper transformation based - on the statistic of dataset. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The value converted to float32. - - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - A boolean value that indicates whether the value is valid. - - Attributes: - column_name (str): - - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=2, - ) - - class CategoricalTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TimestampTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the - - timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. - - Attributes: - column_name (str): - - time_format (str): - The format in which that time field is expressed. The - time_format must either be one of: - - - ``unix-seconds`` - - ``unix-milliseconds`` - - ``unix-microseconds`` - - ``unix-nanoseconds`` (for respectively number of seconds, - milliseconds, microseconds and nanoseconds since start of - the Unix epoch); or be written in ``strftime`` syntax. If - time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = - ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - time_format = proto.Field( - proto.STRING, - number=2, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=3, - ) - - class TextTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - Tokenize text to words. Convert each words to a dictionary lookup - index and generate an embedding for each index. Combine the - embedding of all elements into a single embedding using the mean. - - Tokenization is based on unicode script boundaries. - - Missing values get their own lookup index and resulting - embedding. - - Stop-words receive no special treatment and are not removed. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericArrayTransformation(proto.Message): - r"""Treats the column as numerical array and performs following - transformation functions. - - - All transformations for Numerical types applied to the average of - the all elements. - - The average of empty arrays is treated as zero. - - Attributes: - column_name (str): - - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=2, - ) - - class CategoricalArrayTransformation(proto.Message): - r"""Treats the column as categorical array and performs following - transformation functions. - - - For each element in the array, convert the category name to a - dictionary lookup index and generate an embedding for each index. - Combine the embedding of all elements into a single embedding - using the mean. - - Empty arrays treated as an embedding of zeroes. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TextArrayTransformation(proto.Message): - r"""Treats the column as text array and performs following - transformation functions. - - - Concatenate all text values in the array into a single text value - using a space (" ") as a delimiter, and then treat the result as - a single text value. Apply the transformations for Text columns. - - Empty arrays treated as an empty text. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - auto = proto.Field( - proto.MESSAGE, - number=1, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.AutoTransformation', - ) - numeric = proto.Field( - proto.MESSAGE, - number=2, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.NumericTransformation', - ) - categorical = proto.Field( - proto.MESSAGE, - number=3, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.CategoricalTransformation', - ) - timestamp = proto.Field( - proto.MESSAGE, - number=4, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TimestampTransformation', - ) - text = proto.Field( - proto.MESSAGE, - number=5, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TextTransformation', - ) - repeated_numeric = proto.Field( - proto.MESSAGE, - number=6, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.NumericArrayTransformation', - ) - repeated_categorical = proto.Field( - proto.MESSAGE, - number=7, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.CategoricalArrayTransformation', - ) - repeated_text = proto.Field( - proto.MESSAGE, - number=8, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TextArrayTransformation', - ) - - optimization_objective_recall_value = proto.Field( - proto.FLOAT, - number=5, - oneof='additional_optimization_objective_config', - ) - optimization_objective_precision_value = proto.Field( - proto.FLOAT, - number=6, - oneof='additional_optimization_objective_config', - ) - prediction_type = proto.Field( - proto.STRING, - number=1, - ) - target_column = proto.Field( - proto.STRING, - number=2, - ) - transformations = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Transformation, - ) - optimization_objective = proto.Field( - proto.STRING, - number=4, - ) - train_budget_milli_node_hours = proto.Field( - proto.INT64, - number=7, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=8, - ) - weight_column_name = proto.Field( - proto.STRING, - number=9, - ) - export_evaluated_data_items_config = proto.Field( - proto.MESSAGE, - number=10, - message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, - ) - additional_experiments = proto.RepeatedField( - proto.STRING, - number=11, - ) - - -class AutoMlTablesMetadata(proto.Message): - r"""Model metadata specific to AutoML Tables. - Attributes: - train_cost_milli_node_hours (int): - Output only. The actual training cost of the - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed the train budget. - """ - - train_cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py deleted file mode 100644 index 6844219d37..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - }, -) - - -class AutoMlTextClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextClassificationInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextClassificationInputs', - ) - - -class AutoMlTextClassificationInputs(proto.Message): - r""" - Attributes: - multi_label (bool): - - """ - - multi_label = proto.Field( - proto.BOOL, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py deleted file mode 100644 index 0f03e2f581..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - }, -) - - -class AutoMlTextExtraction(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Extraction Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextExtractionInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextExtractionInputs', - ) - - -class AutoMlTextExtractionInputs(proto.Message): - r""" """ - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py deleted file mode 100644 index 1b5505b69d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - }, -) - - -class AutoMlTextSentiment(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Sentiment Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextSentimentInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextSentimentInputs', - ) - - -class AutoMlTextSentimentInputs(proto.Message): - r""" - Attributes: - sentiment_max (int): - A sentiment is expressed as an integer - ordinal, where higher value means a more - positive sentiment. The range of sentiments that - will be used is between 0 and sentimentMax - (inclusive on both ends), and all the values in - the range must be represented in the dataset - before a model can be created. - Only the Annotations with this sentimentMax will - be used for training. sentimentMax value must be - between 1 and 10 (inclusive). - """ - - sentiment_max = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py deleted file mode 100644 index 89caa6da88..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py +++ /dev/null @@ -1,477 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlForecasting', - 'AutoMlForecastingInputs', - 'AutoMlForecastingMetadata', - }, -) - - -class AutoMlForecasting(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Forecasting - Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlForecastingInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlForecastingMetadata', - ) - - -class AutoMlForecastingInputs(proto.Message): - r""" - Attributes: - target_column (str): - The name of the column that the model is to - predict. - time_series_identifier_column (str): - The name of the column that identifies the - time series. - time_column (str): - The name of the column that identifies time - order in the time series. - transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation]): - Each transformation will apply transform - function to given input column. And the result - will be used for training. When creating - transformation for BigQuery Struct column, the - column should be flattened using "." as the - delimiter. - optimization_objective (str): - Objective function the model is optimizing towards. The - training process creates a model that optimizes the value of - the objective function over the validation set. - - The supported optimization objectives: - - - "minimize-rmse" (default) - Minimize root-mean-squared - error (RMSE). - - - "minimize-mae" - Minimize mean-absolute error (MAE). - - - "minimize-rmsle" - Minimize root-mean-squared log error - (RMSLE). - - - "minimize-rmspe" - Minimize root-mean-squared percentage - error (RMSPE). - - - "minimize-wape-mae" - Minimize the combination of - weighted absolute percentage error (WAPE) and - mean-absolute-error (MAE). - - - "minimize-quantile-loss" - Minimize the quantile loss at - the quantiles defined in ``quantiles``. - train_budget_milli_node_hours (int): - Required. The train budget of creating this - model, expressed in milli node hours i.e. 1,000 - value in this field means 1 node hour. - The training cost of the model will not exceed - this budget. The final cost will be attempted to - be close to the budget, though may end up being - (even) noticeably smaller - at the backend's - discretion. This especially may happen when - further model training ceases to provide any - improvements. - If the budget is set to a value known to be - insufficient to train a model for the given - dataset, the training won't be attempted and - will error. - - The train budget must be between 1,000 and - 72,000 milli node hours, inclusive. - weight_column (str): - Column name that should be used as the weight - column. Higher values in this column give more - importance to the row during model training. The - column must have numeric values between 0 and - 10000 inclusively; 0 means the row is ignored - for training. If weight column field is not set, - then all rows are assumed to have equal weight - of 1. - time_series_attribute_columns (Sequence[str]): - Column names that should be used as attribute - columns. The value of these columns does not - vary as a function of time. For example, store - ID or item color. - unavailable_at_forecast_columns (Sequence[str]): - Names of columns that are unavailable when a forecast is - requested. This column contains information for the given - entity (identified by the time_series_identifier_column) - that is unknown before the forecast For example, actual - weather on a given day. - available_at_forecast_columns (Sequence[str]): - Names of columns that are available and provided when a - forecast is requested. These columns contain information for - the given entity (identified by the - time_series_identifier_column column) that is known at - forecast. For example, predicted weather for a specific day. - data_granularity (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Granularity): - Expected difference in time granularity - between rows in the data. - forecast_horizon (int): - The amount of time into the future for which forecasted - values for the target are returned. Expressed in number of - units defined by the ``data_granularity`` field. - context_window (int): - The amount of time into the past training and prediction - data is used for model training and prediction respectively. - Expressed in number of units defined by the - ``data_granularity`` field. - export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig): - Configuration for exporting test set - predictions to a BigQuery table. If this - configuration is absent, then the export is not - performed. - quantiles (Sequence[float]): - Quantiles to use for minimize-quantile-loss - ``optimization_objective``. Up to 5 quantiles are allowed of - values between 0 and 1, exclusive. Required if the value of - optimization_objective is minimize-quantile-loss. Represents - the percent quantiles to use for that objective. Quantiles - must be unique. - validation_options (str): - Validation options for the data validation component. The - available options are: - - - "fail-pipeline" - default, will validate against the - validation and fail the pipeline if it fails. - - - "ignore-validation" - ignore the results of the - validation and continue - additional_experiments (Sequence[str]): - Additional experiment flags for the time - series forcasting training. - """ - - class Transformation(proto.Message): - r""" - Attributes: - auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.AutoTransformation): - - numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.NumericTransformation): - - categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.CategoricalTransformation): - - timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TimestampTransformation): - - text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TextTransformation): - - """ - - class AutoTransformation(proto.Message): - r"""Training pipeline will infer the proper transformation based - on the statistic of dataset. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The value converted to float32. - - - The z_score of the value. - - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - - A boolean value that indicates whether the value is valid. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class CategoricalTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TimestampTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - Apply the transformation functions for Numerical columns. - - - Determine the year, month, day,and weekday. Treat each value from - the timestamp as a Categorical column. - - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. - - Attributes: - column_name (str): - - time_format (str): - The format in which that time field is expressed. The - time_format must either be one of: - - - ``unix-seconds`` - - - ``unix-milliseconds`` - - - ``unix-microseconds`` - - - ``unix-nanoseconds`` - - (for respectively number of seconds, milliseconds, - microseconds and nanoseconds since start of the Unix epoch); - - or be written in ``strftime`` syntax. - - If time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = ``"Z"`` - (e.g. 1985-04-12T23:20:50.52Z) - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - time_format = proto.Field( - proto.STRING, - number=2, - ) - - class TextTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - auto = proto.Field( - proto.MESSAGE, - number=1, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.AutoTransformation', - ) - numeric = proto.Field( - proto.MESSAGE, - number=2, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.NumericTransformation', - ) - categorical = proto.Field( - proto.MESSAGE, - number=3, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.CategoricalTransformation', - ) - timestamp = proto.Field( - proto.MESSAGE, - number=4, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.TimestampTransformation', - ) - text = proto.Field( - proto.MESSAGE, - number=5, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.TextTransformation', - ) - - class Granularity(proto.Message): - r"""A duration of time expressed in time granularity units. - Attributes: - unit (str): - The time granularity unit of this time period. The supported - units are: - - - "minute" - - - "hour" - - - "day" - - - "week" - - - "month" - - - "year". - quantity (int): - The number of granularity_units between data points in the - training data. If ``granularity_unit`` is ``minute``, can be - 1, 5, 10, 15, or 30. For all other values of - ``granularity_unit``, must be 1. - """ - - unit = proto.Field( - proto.STRING, - number=1, - ) - quantity = proto.Field( - proto.INT64, - number=2, - ) - - target_column = proto.Field( - proto.STRING, - number=1, - ) - time_series_identifier_column = proto.Field( - proto.STRING, - number=2, - ) - time_column = proto.Field( - proto.STRING, - number=3, - ) - transformations = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=Transformation, - ) - optimization_objective = proto.Field( - proto.STRING, - number=5, - ) - train_budget_milli_node_hours = proto.Field( - proto.INT64, - number=6, - ) - weight_column = proto.Field( - proto.STRING, - number=7, - ) - time_series_attribute_columns = proto.RepeatedField( - proto.STRING, - number=19, - ) - unavailable_at_forecast_columns = proto.RepeatedField( - proto.STRING, - number=20, - ) - available_at_forecast_columns = proto.RepeatedField( - proto.STRING, - number=21, - ) - data_granularity = proto.Field( - proto.MESSAGE, - number=22, - message=Granularity, - ) - forecast_horizon = proto.Field( - proto.INT64, - number=23, - ) - context_window = proto.Field( - proto.INT64, - number=24, - ) - export_evaluated_data_items_config = proto.Field( - proto.MESSAGE, - number=15, - message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, - ) - quantiles = proto.RepeatedField( - proto.DOUBLE, - number=16, - ) - validation_options = proto.Field( - proto.STRING, - number=17, - ) - additional_experiments = proto.RepeatedField( - proto.STRING, - number=25, - ) - - -class AutoMlForecastingMetadata(proto.Message): - r"""Model metadata specific to AutoML Forecasting. - Attributes: - train_cost_milli_node_hours (int): - Output only. The actual training cost of the - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed the train budget. - """ - - train_cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py deleted file mode 100644 index b9cce5bbe9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - }, -) - - -class AutoMlVideoActionRecognition(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video Action - Recognition Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoActionRecognitionInputs', - ) - - -class AutoMlVideoActionRecognitionInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_JETSON_VERSATILE_1 = 3 - MOBILE_CORAL_VERSATILE_1 = 4 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py deleted file mode 100644 index e2f0bc89e3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - }, -) - - -class AutoMlVideoClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoClassificationInputs', - ) - - -class AutoMlVideoClassificationInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_JETSON_VERSATILE_1 = 3 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py deleted file mode 100644 index 91f4d9d82a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - }, -) - - -class AutoMlVideoObjectTracking(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video - ObjectTracking Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoObjectTrackingInputs', - ) - - -class AutoMlVideoObjectTrackingInputs(proto.Message): - r""" - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_CORAL_VERSATILE_1 = 3 - MOBILE_CORAL_LOW_LATENCY_1 = 4 - MOBILE_JETSON_VERSATILE_1 = 5 - MOBILE_JETSON_LOW_LATENCY_1 = 6 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py deleted file mode 100644 index f2f42e233b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'ExportEvaluatedDataItemsConfig', - }, -) - - -class ExportEvaluatedDataItemsConfig(proto.Message): - r"""Configuration for exporting test set predictions to a - BigQuery table. - - Attributes: - destination_bigquery_uri (str): - URI of desired destination BigQuery table. Expected format: - bq://:: - - If not specified, then results are exported to the following - auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples - override_existing_table (bool): - If true and an export destination is - specified, then the contents of the destination - are overwritten. Otherwise, if the export - destination already exists, then the export - operation fails. - """ - - destination_bigquery_uri = proto.Field( - proto.STRING, - number=1, - ) - override_existing_table = proto.Field( - proto.BOOL, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py deleted file mode 100644 index d356a80d31..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py +++ /dev/null @@ -1,876 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from .services.dataset_service import DatasetServiceClient -from .services.dataset_service import DatasetServiceAsyncClient -from .services.endpoint_service import EndpointServiceClient -from .services.endpoint_service import EndpointServiceAsyncClient -from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient -from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient -from .services.featurestore_service import FeaturestoreServiceClient -from .services.featurestore_service import FeaturestoreServiceAsyncClient -from .services.index_endpoint_service import IndexEndpointServiceClient -from .services.index_endpoint_service import IndexEndpointServiceAsyncClient -from .services.index_service import IndexServiceClient -from .services.index_service import IndexServiceAsyncClient -from .services.job_service import JobServiceClient -from .services.job_service import JobServiceAsyncClient -from .services.metadata_service import MetadataServiceClient -from .services.metadata_service import MetadataServiceAsyncClient -from .services.migration_service import MigrationServiceClient -from .services.migration_service import MigrationServiceAsyncClient -from .services.model_service import ModelServiceClient -from .services.model_service import ModelServiceAsyncClient -from .services.pipeline_service import PipelineServiceClient -from .services.pipeline_service import PipelineServiceAsyncClient -from .services.prediction_service import PredictionServiceClient -from .services.prediction_service import PredictionServiceAsyncClient -from .services.specialist_pool_service import SpecialistPoolServiceClient -from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient -from .services.tensorboard_service import TensorboardServiceClient -from .services.tensorboard_service import TensorboardServiceAsyncClient -from .services.vizier_service import VizierServiceClient -from .services.vizier_service import VizierServiceAsyncClient - -from .types.accelerator_type import AcceleratorType -from .types.annotation import Annotation -from .types.annotation_spec import AnnotationSpec -from .types.artifact import Artifact -from .types.batch_prediction_job import BatchPredictionJob -from .types.completion_stats import CompletionStats -from .types.context import Context -from .types.custom_job import ContainerSpec -from .types.custom_job import CustomJob -from .types.custom_job import CustomJobSpec -from .types.custom_job import PythonPackageSpec -from .types.custom_job import Scheduling -from .types.custom_job import WorkerPoolSpec -from .types.data_item import DataItem -from .types.data_labeling_job import ActiveLearningConfig -from .types.data_labeling_job import DataLabelingJob -from .types.data_labeling_job import SampleConfig -from .types.data_labeling_job import TrainingConfig -from .types.dataset import Dataset -from .types.dataset import ExportDataConfig -from .types.dataset import ImportDataConfig -from .types.dataset_service import CreateDatasetOperationMetadata -from .types.dataset_service import CreateDatasetRequest -from .types.dataset_service import DeleteDatasetRequest -from .types.dataset_service import ExportDataOperationMetadata -from .types.dataset_service import ExportDataRequest -from .types.dataset_service import ExportDataResponse -from .types.dataset_service import GetAnnotationSpecRequest -from .types.dataset_service import GetDatasetRequest -from .types.dataset_service import ImportDataOperationMetadata -from .types.dataset_service import ImportDataRequest -from .types.dataset_service import ImportDataResponse -from .types.dataset_service import ListAnnotationsRequest -from .types.dataset_service import ListAnnotationsResponse -from .types.dataset_service import ListDataItemsRequest -from .types.dataset_service import ListDataItemsResponse -from .types.dataset_service import ListDatasetsRequest -from .types.dataset_service import ListDatasetsResponse -from .types.dataset_service import UpdateDatasetRequest -from .types.deployed_index_ref import DeployedIndexRef -from .types.deployed_model_ref import DeployedModelRef -from .types.encryption_spec import EncryptionSpec -from .types.endpoint import DeployedModel -from .types.endpoint import Endpoint -from .types.endpoint_service import CreateEndpointOperationMetadata -from .types.endpoint_service import CreateEndpointRequest -from .types.endpoint_service import DeleteEndpointRequest -from .types.endpoint_service import DeployModelOperationMetadata -from .types.endpoint_service import DeployModelRequest -from .types.endpoint_service import DeployModelResponse -from .types.endpoint_service import GetEndpointRequest -from .types.endpoint_service import ListEndpointsRequest -from .types.endpoint_service import ListEndpointsResponse -from .types.endpoint_service import UndeployModelOperationMetadata -from .types.endpoint_service import UndeployModelRequest -from .types.endpoint_service import UndeployModelResponse -from .types.endpoint_service import UpdateEndpointRequest -from .types.entity_type import EntityType -from .types.env_var import EnvVar -from .types.event import Event -from .types.execution import Execution -from .types.explanation import Attribution -from .types.explanation import Explanation -from .types.explanation import ExplanationMetadataOverride -from .types.explanation import ExplanationParameters -from .types.explanation import ExplanationSpec -from .types.explanation import ExplanationSpecOverride -from .types.explanation import FeatureNoiseSigma -from .types.explanation import IntegratedGradientsAttribution -from .types.explanation import ModelExplanation -from .types.explanation import SampledShapleyAttribution -from .types.explanation import SmoothGradConfig -from .types.explanation import XraiAttribution -from .types.explanation_metadata import ExplanationMetadata -from .types.feature import Feature -from .types.feature_monitoring_stats import FeatureStatsAnomaly -from .types.feature_selector import FeatureSelector -from .types.feature_selector import IdMatcher -from .types.featurestore import Featurestore -from .types.featurestore_monitoring import FeaturestoreMonitoringConfig -from .types.featurestore_online_service import FeatureValue -from .types.featurestore_online_service import FeatureValueList -from .types.featurestore_online_service import ReadFeatureValuesRequest -from .types.featurestore_online_service import ReadFeatureValuesResponse -from .types.featurestore_online_service import StreamingReadFeatureValuesRequest -from .types.featurestore_service import BatchCreateFeaturesOperationMetadata -from .types.featurestore_service import BatchCreateFeaturesRequest -from .types.featurestore_service import BatchCreateFeaturesResponse -from .types.featurestore_service import BatchReadFeatureValuesOperationMetadata -from .types.featurestore_service import BatchReadFeatureValuesRequest -from .types.featurestore_service import BatchReadFeatureValuesResponse -from .types.featurestore_service import CreateEntityTypeOperationMetadata -from .types.featurestore_service import CreateEntityTypeRequest -from .types.featurestore_service import CreateFeatureOperationMetadata -from .types.featurestore_service import CreateFeatureRequest -from .types.featurestore_service import CreateFeaturestoreOperationMetadata -from .types.featurestore_service import CreateFeaturestoreRequest -from .types.featurestore_service import DeleteEntityTypeRequest -from .types.featurestore_service import DeleteFeatureRequest -from .types.featurestore_service import DeleteFeaturestoreRequest -from .types.featurestore_service import DestinationFeatureSetting -from .types.featurestore_service import ExportFeatureValuesOperationMetadata -from .types.featurestore_service import ExportFeatureValuesRequest -from .types.featurestore_service import ExportFeatureValuesResponse -from .types.featurestore_service import FeatureValueDestination -from .types.featurestore_service import GetEntityTypeRequest -from .types.featurestore_service import GetFeatureRequest -from .types.featurestore_service import GetFeaturestoreRequest -from .types.featurestore_service import ImportFeatureValuesOperationMetadata -from .types.featurestore_service import ImportFeatureValuesRequest -from .types.featurestore_service import ImportFeatureValuesResponse -from .types.featurestore_service import ListEntityTypesRequest -from .types.featurestore_service import ListEntityTypesResponse -from .types.featurestore_service import ListFeaturesRequest -from .types.featurestore_service import ListFeaturesResponse -from .types.featurestore_service import ListFeaturestoresRequest -from .types.featurestore_service import ListFeaturestoresResponse -from .types.featurestore_service import SearchFeaturesRequest -from .types.featurestore_service import SearchFeaturesResponse -from .types.featurestore_service import UpdateEntityTypeRequest -from .types.featurestore_service import UpdateFeatureRequest -from .types.featurestore_service import UpdateFeaturestoreOperationMetadata -from .types.featurestore_service import UpdateFeaturestoreRequest -from .types.hyperparameter_tuning_job import HyperparameterTuningJob -from .types.index import Index -from .types.index_endpoint import DeployedIndex -from .types.index_endpoint import DeployedIndexAuthConfig -from .types.index_endpoint import IndexEndpoint -from .types.index_endpoint import IndexPrivateEndpoints -from .types.index_endpoint_service import CreateIndexEndpointOperationMetadata -from .types.index_endpoint_service import CreateIndexEndpointRequest -from .types.index_endpoint_service import DeleteIndexEndpointRequest -from .types.index_endpoint_service import DeployIndexOperationMetadata -from .types.index_endpoint_service import DeployIndexRequest -from .types.index_endpoint_service import DeployIndexResponse -from .types.index_endpoint_service import GetIndexEndpointRequest -from .types.index_endpoint_service import ListIndexEndpointsRequest -from .types.index_endpoint_service import ListIndexEndpointsResponse -from .types.index_endpoint_service import UndeployIndexOperationMetadata -from .types.index_endpoint_service import UndeployIndexRequest -from .types.index_endpoint_service import UndeployIndexResponse -from .types.index_endpoint_service import UpdateIndexEndpointRequest -from .types.index_service import CreateIndexOperationMetadata -from .types.index_service import CreateIndexRequest -from .types.index_service import DeleteIndexRequest -from .types.index_service import GetIndexRequest -from .types.index_service import ListIndexesRequest -from .types.index_service import ListIndexesResponse -from .types.index_service import NearestNeighborSearchOperationMetadata -from .types.index_service import UpdateIndexOperationMetadata -from .types.index_service import UpdateIndexRequest -from .types.io import AvroSource -from .types.io import BigQueryDestination -from .types.io import BigQuerySource -from .types.io import ContainerRegistryDestination -from .types.io import CsvDestination -from .types.io import CsvSource -from .types.io import GcsDestination -from .types.io import GcsSource -from .types.io import TFRecordDestination -from .types.job_service import CancelBatchPredictionJobRequest -from .types.job_service import CancelCustomJobRequest -from .types.job_service import CancelDataLabelingJobRequest -from .types.job_service import CancelHyperparameterTuningJobRequest -from .types.job_service import CreateBatchPredictionJobRequest -from .types.job_service import CreateCustomJobRequest -from .types.job_service import CreateDataLabelingJobRequest -from .types.job_service import CreateHyperparameterTuningJobRequest -from .types.job_service import CreateModelDeploymentMonitoringJobRequest -from .types.job_service import DeleteBatchPredictionJobRequest -from .types.job_service import DeleteCustomJobRequest -from .types.job_service import DeleteDataLabelingJobRequest -from .types.job_service import DeleteHyperparameterTuningJobRequest -from .types.job_service import DeleteModelDeploymentMonitoringJobRequest -from .types.job_service import GetBatchPredictionJobRequest -from .types.job_service import GetCustomJobRequest -from .types.job_service import GetDataLabelingJobRequest -from .types.job_service import GetHyperparameterTuningJobRequest -from .types.job_service import GetModelDeploymentMonitoringJobRequest -from .types.job_service import ListBatchPredictionJobsRequest -from .types.job_service import ListBatchPredictionJobsResponse -from .types.job_service import ListCustomJobsRequest -from .types.job_service import ListCustomJobsResponse -from .types.job_service import ListDataLabelingJobsRequest -from .types.job_service import ListDataLabelingJobsResponse -from .types.job_service import ListHyperparameterTuningJobsRequest -from .types.job_service import ListHyperparameterTuningJobsResponse -from .types.job_service import ListModelDeploymentMonitoringJobsRequest -from .types.job_service import ListModelDeploymentMonitoringJobsResponse -from .types.job_service import PauseModelDeploymentMonitoringJobRequest -from .types.job_service import ResumeModelDeploymentMonitoringJobRequest -from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest -from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse -from .types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata -from .types.job_service import UpdateModelDeploymentMonitoringJobRequest -from .types.job_state import JobState -from .types.lineage_subgraph import LineageSubgraph -from .types.machine_resources import AutomaticResources -from .types.machine_resources import AutoscalingMetricSpec -from .types.machine_resources import BatchDedicatedResources -from .types.machine_resources import DedicatedResources -from .types.machine_resources import DiskSpec -from .types.machine_resources import MachineSpec -from .types.machine_resources import ResourcesConsumed -from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters -from .types.metadata_schema import MetadataSchema -from .types.metadata_service import AddContextArtifactsAndExecutionsRequest -from .types.metadata_service import AddContextArtifactsAndExecutionsResponse -from .types.metadata_service import AddContextChildrenRequest -from .types.metadata_service import AddContextChildrenResponse -from .types.metadata_service import AddExecutionEventsRequest -from .types.metadata_service import AddExecutionEventsResponse -from .types.metadata_service import CreateArtifactRequest -from .types.metadata_service import CreateContextRequest -from .types.metadata_service import CreateExecutionRequest -from .types.metadata_service import CreateMetadataSchemaRequest -from .types.metadata_service import CreateMetadataStoreOperationMetadata -from .types.metadata_service import CreateMetadataStoreRequest -from .types.metadata_service import DeleteContextRequest -from .types.metadata_service import DeleteMetadataStoreOperationMetadata -from .types.metadata_service import DeleteMetadataStoreRequest -from .types.metadata_service import GetArtifactRequest -from .types.metadata_service import GetContextRequest -from .types.metadata_service import GetExecutionRequest -from .types.metadata_service import GetMetadataSchemaRequest -from .types.metadata_service import GetMetadataStoreRequest -from .types.metadata_service import ListArtifactsRequest -from .types.metadata_service import ListArtifactsResponse -from .types.metadata_service import ListContextsRequest -from .types.metadata_service import ListContextsResponse -from .types.metadata_service import ListExecutionsRequest -from .types.metadata_service import ListExecutionsResponse -from .types.metadata_service import ListMetadataSchemasRequest -from .types.metadata_service import ListMetadataSchemasResponse -from .types.metadata_service import ListMetadataStoresRequest -from .types.metadata_service import ListMetadataStoresResponse -from .types.metadata_service import QueryArtifactLineageSubgraphRequest -from .types.metadata_service import QueryContextLineageSubgraphRequest -from .types.metadata_service import QueryExecutionInputsAndOutputsRequest -from .types.metadata_service import UpdateArtifactRequest -from .types.metadata_service import UpdateContextRequest -from .types.metadata_service import UpdateExecutionRequest -from .types.metadata_store import MetadataStore -from .types.migratable_resource import MigratableResource -from .types.migration_service import BatchMigrateResourcesOperationMetadata -from .types.migration_service import BatchMigrateResourcesRequest -from .types.migration_service import BatchMigrateResourcesResponse -from .types.migration_service import MigrateResourceRequest -from .types.migration_service import MigrateResourceResponse -from .types.migration_service import SearchMigratableResourcesRequest -from .types.migration_service import SearchMigratableResourcesResponse -from .types.model import Model -from .types.model import ModelContainerSpec -from .types.model import Port -from .types.model import PredictSchemata -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig -from .types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType -from .types.model_evaluation import ModelEvaluation -from .types.model_evaluation_slice import ModelEvaluationSlice -from .types.model_monitoring import ModelMonitoringAlertConfig -from .types.model_monitoring import ModelMonitoringObjectiveConfig -from .types.model_monitoring import SamplingStrategy -from .types.model_monitoring import ThresholdConfig -from .types.model_service import DeleteModelRequest -from .types.model_service import ExportModelOperationMetadata -from .types.model_service import ExportModelRequest -from .types.model_service import ExportModelResponse -from .types.model_service import GetModelEvaluationRequest -from .types.model_service import GetModelEvaluationSliceRequest -from .types.model_service import GetModelRequest -from .types.model_service import ListModelEvaluationSlicesRequest -from .types.model_service import ListModelEvaluationSlicesResponse -from .types.model_service import ListModelEvaluationsRequest -from .types.model_service import ListModelEvaluationsResponse -from .types.model_service import ListModelsRequest -from .types.model_service import ListModelsResponse -from .types.model_service import UpdateModelRequest -from .types.model_service import UploadModelOperationMetadata -from .types.model_service import UploadModelRequest -from .types.model_service import UploadModelResponse -from .types.operation import DeleteOperationMetadata -from .types.operation import GenericOperationMetadata -from .types.pipeline_job import PipelineJob -from .types.pipeline_job import PipelineJobDetail -from .types.pipeline_job import PipelineTaskDetail -from .types.pipeline_job import PipelineTaskExecutorDetail -from .types.pipeline_service import CancelPipelineJobRequest -from .types.pipeline_service import CancelTrainingPipelineRequest -from .types.pipeline_service import CreatePipelineJobRequest -from .types.pipeline_service import CreateTrainingPipelineRequest -from .types.pipeline_service import DeletePipelineJobRequest -from .types.pipeline_service import DeleteTrainingPipelineRequest -from .types.pipeline_service import GetPipelineJobRequest -from .types.pipeline_service import GetTrainingPipelineRequest -from .types.pipeline_service import ListPipelineJobsRequest -from .types.pipeline_service import ListPipelineJobsResponse -from .types.pipeline_service import ListTrainingPipelinesRequest -from .types.pipeline_service import ListTrainingPipelinesResponse -from .types.pipeline_state import PipelineState -from .types.prediction_service import ExplainRequest -from .types.prediction_service import ExplainResponse -from .types.prediction_service import PredictRequest -from .types.prediction_service import PredictResponse -from .types.specialist_pool import SpecialistPool -from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata -from .types.specialist_pool_service import CreateSpecialistPoolRequest -from .types.specialist_pool_service import DeleteSpecialistPoolRequest -from .types.specialist_pool_service import GetSpecialistPoolRequest -from .types.specialist_pool_service import ListSpecialistPoolsRequest -from .types.specialist_pool_service import ListSpecialistPoolsResponse -from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata -from .types.specialist_pool_service import UpdateSpecialistPoolRequest -from .types.study import Measurement -from .types.study import Study -from .types.study import StudySpec -from .types.study import Trial -from .types.tensorboard import Tensorboard -from .types.tensorboard_data import Scalar -from .types.tensorboard_data import TensorboardBlob -from .types.tensorboard_data import TensorboardBlobSequence -from .types.tensorboard_data import TensorboardTensor -from .types.tensorboard_data import TimeSeriesData -from .types.tensorboard_data import TimeSeriesDataPoint -from .types.tensorboard_experiment import TensorboardExperiment -from .types.tensorboard_run import TensorboardRun -from .types.tensorboard_service import CreateTensorboardExperimentRequest -from .types.tensorboard_service import CreateTensorboardOperationMetadata -from .types.tensorboard_service import CreateTensorboardRequest -from .types.tensorboard_service import CreateTensorboardRunRequest -from .types.tensorboard_service import CreateTensorboardTimeSeriesRequest -from .types.tensorboard_service import DeleteTensorboardExperimentRequest -from .types.tensorboard_service import DeleteTensorboardRequest -from .types.tensorboard_service import DeleteTensorboardRunRequest -from .types.tensorboard_service import DeleteTensorboardTimeSeriesRequest -from .types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest -from .types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse -from .types.tensorboard_service import GetTensorboardExperimentRequest -from .types.tensorboard_service import GetTensorboardRequest -from .types.tensorboard_service import GetTensorboardRunRequest -from .types.tensorboard_service import GetTensorboardTimeSeriesRequest -from .types.tensorboard_service import ListTensorboardExperimentsRequest -from .types.tensorboard_service import ListTensorboardExperimentsResponse -from .types.tensorboard_service import ListTensorboardRunsRequest -from .types.tensorboard_service import ListTensorboardRunsResponse -from .types.tensorboard_service import ListTensorboardsRequest -from .types.tensorboard_service import ListTensorboardsResponse -from .types.tensorboard_service import ListTensorboardTimeSeriesRequest -from .types.tensorboard_service import ListTensorboardTimeSeriesResponse -from .types.tensorboard_service import ReadTensorboardBlobDataRequest -from .types.tensorboard_service import ReadTensorboardBlobDataResponse -from .types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest -from .types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse -from .types.tensorboard_service import UpdateTensorboardExperimentRequest -from .types.tensorboard_service import UpdateTensorboardOperationMetadata -from .types.tensorboard_service import UpdateTensorboardRequest -from .types.tensorboard_service import UpdateTensorboardRunRequest -from .types.tensorboard_service import UpdateTensorboardTimeSeriesRequest -from .types.tensorboard_service import WriteTensorboardRunDataRequest -from .types.tensorboard_service import WriteTensorboardRunDataResponse -from .types.tensorboard_time_series import TensorboardTimeSeries -from .types.training_pipeline import FilterSplit -from .types.training_pipeline import FractionSplit -from .types.training_pipeline import InputDataConfig -from .types.training_pipeline import PredefinedSplit -from .types.training_pipeline import TimestampSplit -from .types.training_pipeline import TrainingPipeline -from .types.types import BoolArray -from .types.types import DoubleArray -from .types.types import Int64Array -from .types.types import StringArray -from .types.user_action_reference import UserActionReference -from .types.value import Value -from .types.vizier_service import AddTrialMeasurementRequest -from .types.vizier_service import CheckTrialEarlyStoppingStateMetatdata -from .types.vizier_service import CheckTrialEarlyStoppingStateRequest -from .types.vizier_service import CheckTrialEarlyStoppingStateResponse -from .types.vizier_service import CompleteTrialRequest -from .types.vizier_service import CreateStudyRequest -from .types.vizier_service import CreateTrialRequest -from .types.vizier_service import DeleteStudyRequest -from .types.vizier_service import DeleteTrialRequest -from .types.vizier_service import GetStudyRequest -from .types.vizier_service import GetTrialRequest -from .types.vizier_service import ListOptimalTrialsRequest -from .types.vizier_service import ListOptimalTrialsResponse -from .types.vizier_service import ListStudiesRequest -from .types.vizier_service import ListStudiesResponse -from .types.vizier_service import ListTrialsRequest -from .types.vizier_service import ListTrialsResponse -from .types.vizier_service import LookupStudyRequest -from .types.vizier_service import StopTrialRequest -from .types.vizier_service import SuggestTrialsMetadata -from .types.vizier_service import SuggestTrialsRequest -from .types.vizier_service import SuggestTrialsResponse - -__all__ = ( - 'DatasetServiceAsyncClient', - 'EndpointServiceAsyncClient', - 'FeaturestoreOnlineServingServiceAsyncClient', - 'FeaturestoreServiceAsyncClient', - 'IndexEndpointServiceAsyncClient', - 'IndexServiceAsyncClient', - 'JobServiceAsyncClient', - 'MetadataServiceAsyncClient', - 'MigrationServiceAsyncClient', - 'ModelServiceAsyncClient', - 'PipelineServiceAsyncClient', - 'PredictionServiceAsyncClient', - 'SpecialistPoolServiceAsyncClient', - 'TensorboardServiceAsyncClient', - 'VizierServiceAsyncClient', -'AcceleratorType', -'ActiveLearningConfig', -'AddContextArtifactsAndExecutionsRequest', -'AddContextArtifactsAndExecutionsResponse', -'AddContextChildrenRequest', -'AddContextChildrenResponse', -'AddExecutionEventsRequest', -'AddExecutionEventsResponse', -'AddTrialMeasurementRequest', -'Annotation', -'AnnotationSpec', -'Artifact', -'Attribution', -'AutomaticResources', -'AutoscalingMetricSpec', -'AvroSource', -'BatchCreateFeaturesOperationMetadata', -'BatchCreateFeaturesRequest', -'BatchCreateFeaturesResponse', -'BatchDedicatedResources', -'BatchMigrateResourcesOperationMetadata', -'BatchMigrateResourcesRequest', -'BatchMigrateResourcesResponse', -'BatchPredictionJob', -'BatchReadFeatureValuesOperationMetadata', -'BatchReadFeatureValuesRequest', -'BatchReadFeatureValuesResponse', -'BigQueryDestination', -'BigQuerySource', -'BoolArray', -'CancelBatchPredictionJobRequest', -'CancelCustomJobRequest', -'CancelDataLabelingJobRequest', -'CancelHyperparameterTuningJobRequest', -'CancelPipelineJobRequest', -'CancelTrainingPipelineRequest', -'CheckTrialEarlyStoppingStateMetatdata', -'CheckTrialEarlyStoppingStateRequest', -'CheckTrialEarlyStoppingStateResponse', -'CompleteTrialRequest', -'CompletionStats', -'ContainerRegistryDestination', -'ContainerSpec', -'Context', -'CreateArtifactRequest', -'CreateBatchPredictionJobRequest', -'CreateContextRequest', -'CreateCustomJobRequest', -'CreateDataLabelingJobRequest', -'CreateDatasetOperationMetadata', -'CreateDatasetRequest', -'CreateEndpointOperationMetadata', -'CreateEndpointRequest', -'CreateEntityTypeOperationMetadata', -'CreateEntityTypeRequest', -'CreateExecutionRequest', -'CreateFeatureOperationMetadata', -'CreateFeatureRequest', -'CreateFeaturestoreOperationMetadata', -'CreateFeaturestoreRequest', -'CreateHyperparameterTuningJobRequest', -'CreateIndexEndpointOperationMetadata', -'CreateIndexEndpointRequest', -'CreateIndexOperationMetadata', -'CreateIndexRequest', -'CreateMetadataSchemaRequest', -'CreateMetadataStoreOperationMetadata', -'CreateMetadataStoreRequest', -'CreateModelDeploymentMonitoringJobRequest', -'CreatePipelineJobRequest', -'CreateSpecialistPoolOperationMetadata', -'CreateSpecialistPoolRequest', -'CreateStudyRequest', -'CreateTensorboardExperimentRequest', -'CreateTensorboardOperationMetadata', -'CreateTensorboardRequest', -'CreateTensorboardRunRequest', -'CreateTensorboardTimeSeriesRequest', -'CreateTrainingPipelineRequest', -'CreateTrialRequest', -'CsvDestination', -'CsvSource', -'CustomJob', -'CustomJobSpec', -'DataItem', -'DataLabelingJob', -'Dataset', -'DatasetServiceClient', -'DedicatedResources', -'DeleteBatchPredictionJobRequest', -'DeleteContextRequest', -'DeleteCustomJobRequest', -'DeleteDataLabelingJobRequest', -'DeleteDatasetRequest', -'DeleteEndpointRequest', -'DeleteEntityTypeRequest', -'DeleteFeatureRequest', -'DeleteFeaturestoreRequest', -'DeleteHyperparameterTuningJobRequest', -'DeleteIndexEndpointRequest', -'DeleteIndexRequest', -'DeleteMetadataStoreOperationMetadata', -'DeleteMetadataStoreRequest', -'DeleteModelDeploymentMonitoringJobRequest', -'DeleteModelRequest', -'DeleteOperationMetadata', -'DeletePipelineJobRequest', -'DeleteSpecialistPoolRequest', -'DeleteStudyRequest', -'DeleteTensorboardExperimentRequest', -'DeleteTensorboardRequest', -'DeleteTensorboardRunRequest', -'DeleteTensorboardTimeSeriesRequest', -'DeleteTrainingPipelineRequest', -'DeleteTrialRequest', -'DeployIndexOperationMetadata', -'DeployIndexRequest', -'DeployIndexResponse', -'DeployModelOperationMetadata', -'DeployModelRequest', -'DeployModelResponse', -'DeployedIndex', -'DeployedIndexAuthConfig', -'DeployedIndexRef', -'DeployedModel', -'DeployedModelRef', -'DestinationFeatureSetting', -'DiskSpec', -'DoubleArray', -'EncryptionSpec', -'Endpoint', -'EndpointServiceClient', -'EntityType', -'EnvVar', -'Event', -'Execution', -'ExplainRequest', -'ExplainResponse', -'Explanation', -'ExplanationMetadata', -'ExplanationMetadataOverride', -'ExplanationParameters', -'ExplanationSpec', -'ExplanationSpecOverride', -'ExportDataConfig', -'ExportDataOperationMetadata', -'ExportDataRequest', -'ExportDataResponse', -'ExportFeatureValuesOperationMetadata', -'ExportFeatureValuesRequest', -'ExportFeatureValuesResponse', -'ExportModelOperationMetadata', -'ExportModelRequest', -'ExportModelResponse', -'ExportTensorboardTimeSeriesDataRequest', -'ExportTensorboardTimeSeriesDataResponse', -'Feature', -'FeatureNoiseSigma', -'FeatureSelector', -'FeatureStatsAnomaly', -'FeatureValue', -'FeatureValueDestination', -'FeatureValueList', -'Featurestore', -'FeaturestoreMonitoringConfig', -'FeaturestoreOnlineServingServiceClient', -'FeaturestoreServiceClient', -'FilterSplit', -'FractionSplit', -'GcsDestination', -'GcsSource', -'GenericOperationMetadata', -'GetAnnotationSpecRequest', -'GetArtifactRequest', -'GetBatchPredictionJobRequest', -'GetContextRequest', -'GetCustomJobRequest', -'GetDataLabelingJobRequest', -'GetDatasetRequest', -'GetEndpointRequest', -'GetEntityTypeRequest', -'GetExecutionRequest', -'GetFeatureRequest', -'GetFeaturestoreRequest', -'GetHyperparameterTuningJobRequest', -'GetIndexEndpointRequest', -'GetIndexRequest', -'GetMetadataSchemaRequest', -'GetMetadataStoreRequest', -'GetModelDeploymentMonitoringJobRequest', -'GetModelEvaluationRequest', -'GetModelEvaluationSliceRequest', -'GetModelRequest', -'GetPipelineJobRequest', -'GetSpecialistPoolRequest', -'GetStudyRequest', -'GetTensorboardExperimentRequest', -'GetTensorboardRequest', -'GetTensorboardRunRequest', -'GetTensorboardTimeSeriesRequest', -'GetTrainingPipelineRequest', -'GetTrialRequest', -'HyperparameterTuningJob', -'IdMatcher', -'ImportDataConfig', -'ImportDataOperationMetadata', -'ImportDataRequest', -'ImportDataResponse', -'ImportFeatureValuesOperationMetadata', -'ImportFeatureValuesRequest', -'ImportFeatureValuesResponse', -'Index', -'IndexEndpoint', -'IndexEndpointServiceClient', -'IndexPrivateEndpoints', -'IndexServiceClient', -'InputDataConfig', -'Int64Array', -'IntegratedGradientsAttribution', -'JobServiceClient', -'JobState', -'LineageSubgraph', -'ListAnnotationsRequest', -'ListAnnotationsResponse', -'ListArtifactsRequest', -'ListArtifactsResponse', -'ListBatchPredictionJobsRequest', -'ListBatchPredictionJobsResponse', -'ListContextsRequest', -'ListContextsResponse', -'ListCustomJobsRequest', -'ListCustomJobsResponse', -'ListDataItemsRequest', -'ListDataItemsResponse', -'ListDataLabelingJobsRequest', -'ListDataLabelingJobsResponse', -'ListDatasetsRequest', -'ListDatasetsResponse', -'ListEndpointsRequest', -'ListEndpointsResponse', -'ListEntityTypesRequest', -'ListEntityTypesResponse', -'ListExecutionsRequest', -'ListExecutionsResponse', -'ListFeaturesRequest', -'ListFeaturesResponse', -'ListFeaturestoresRequest', -'ListFeaturestoresResponse', -'ListHyperparameterTuningJobsRequest', -'ListHyperparameterTuningJobsResponse', -'ListIndexEndpointsRequest', -'ListIndexEndpointsResponse', -'ListIndexesRequest', -'ListIndexesResponse', -'ListMetadataSchemasRequest', -'ListMetadataSchemasResponse', -'ListMetadataStoresRequest', -'ListMetadataStoresResponse', -'ListModelDeploymentMonitoringJobsRequest', -'ListModelDeploymentMonitoringJobsResponse', -'ListModelEvaluationSlicesRequest', -'ListModelEvaluationSlicesResponse', -'ListModelEvaluationsRequest', -'ListModelEvaluationsResponse', -'ListModelsRequest', -'ListModelsResponse', -'ListOptimalTrialsRequest', -'ListOptimalTrialsResponse', -'ListPipelineJobsRequest', -'ListPipelineJobsResponse', -'ListSpecialistPoolsRequest', -'ListSpecialistPoolsResponse', -'ListStudiesRequest', -'ListStudiesResponse', -'ListTensorboardExperimentsRequest', -'ListTensorboardExperimentsResponse', -'ListTensorboardRunsRequest', -'ListTensorboardRunsResponse', -'ListTensorboardTimeSeriesRequest', -'ListTensorboardTimeSeriesResponse', -'ListTensorboardsRequest', -'ListTensorboardsResponse', -'ListTrainingPipelinesRequest', -'ListTrainingPipelinesResponse', -'ListTrialsRequest', -'ListTrialsResponse', -'LookupStudyRequest', -'MachineSpec', -'ManualBatchTuningParameters', -'Measurement', -'MetadataSchema', -'MetadataServiceClient', -'MetadataStore', -'MigratableResource', -'MigrateResourceRequest', -'MigrateResourceResponse', -'MigrationServiceClient', -'Model', -'ModelContainerSpec', -'ModelDeploymentMonitoringBigQueryTable', -'ModelDeploymentMonitoringJob', -'ModelDeploymentMonitoringObjectiveConfig', -'ModelDeploymentMonitoringObjectiveType', -'ModelDeploymentMonitoringScheduleConfig', -'ModelEvaluation', -'ModelEvaluationSlice', -'ModelExplanation', -'ModelMonitoringAlertConfig', -'ModelMonitoringObjectiveConfig', -'ModelMonitoringStatsAnomalies', -'ModelServiceClient', -'NearestNeighborSearchOperationMetadata', -'PauseModelDeploymentMonitoringJobRequest', -'PipelineJob', -'PipelineJobDetail', -'PipelineServiceClient', -'PipelineState', -'PipelineTaskDetail', -'PipelineTaskExecutorDetail', -'Port', -'PredefinedSplit', -'PredictRequest', -'PredictResponse', -'PredictSchemata', -'PredictionServiceClient', -'PythonPackageSpec', -'QueryArtifactLineageSubgraphRequest', -'QueryContextLineageSubgraphRequest', -'QueryExecutionInputsAndOutputsRequest', -'ReadFeatureValuesRequest', -'ReadFeatureValuesResponse', -'ReadTensorboardBlobDataRequest', -'ReadTensorboardBlobDataResponse', -'ReadTensorboardTimeSeriesDataRequest', -'ReadTensorboardTimeSeriesDataResponse', -'ResourcesConsumed', -'ResumeModelDeploymentMonitoringJobRequest', -'SampleConfig', -'SampledShapleyAttribution', -'SamplingStrategy', -'Scalar', -'Scheduling', -'SearchFeaturesRequest', -'SearchFeaturesResponse', -'SearchMigratableResourcesRequest', -'SearchMigratableResourcesResponse', -'SearchModelDeploymentMonitoringStatsAnomaliesRequest', -'SearchModelDeploymentMonitoringStatsAnomaliesResponse', -'SmoothGradConfig', -'SpecialistPool', -'SpecialistPoolServiceClient', -'StopTrialRequest', -'StreamingReadFeatureValuesRequest', -'StringArray', -'Study', -'StudySpec', -'SuggestTrialsMetadata', -'SuggestTrialsRequest', -'SuggestTrialsResponse', -'TFRecordDestination', -'Tensorboard', -'TensorboardBlob', -'TensorboardBlobSequence', -'TensorboardExperiment', -'TensorboardRun', -'TensorboardServiceClient', -'TensorboardTensor', -'TensorboardTimeSeries', -'ThresholdConfig', -'TimeSeriesData', -'TimeSeriesDataPoint', -'TimestampSplit', -'TrainingConfig', -'TrainingPipeline', -'Trial', -'UndeployIndexOperationMetadata', -'UndeployIndexRequest', -'UndeployIndexResponse', -'UndeployModelOperationMetadata', -'UndeployModelRequest', -'UndeployModelResponse', -'UpdateArtifactRequest', -'UpdateContextRequest', -'UpdateDatasetRequest', -'UpdateEndpointRequest', -'UpdateEntityTypeRequest', -'UpdateExecutionRequest', -'UpdateFeatureRequest', -'UpdateFeaturestoreOperationMetadata', -'UpdateFeaturestoreRequest', -'UpdateIndexEndpointRequest', -'UpdateIndexOperationMetadata', -'UpdateIndexRequest', -'UpdateModelDeploymentMonitoringJobOperationMetadata', -'UpdateModelDeploymentMonitoringJobRequest', -'UpdateModelRequest', -'UpdateSpecialistPoolOperationMetadata', -'UpdateSpecialistPoolRequest', -'UpdateTensorboardExperimentRequest', -'UpdateTensorboardOperationMetadata', -'UpdateTensorboardRequest', -'UpdateTensorboardRunRequest', -'UpdateTensorboardTimeSeriesRequest', -'UploadModelOperationMetadata', -'UploadModelRequest', -'UploadModelResponse', -'UserActionReference', -'Value', -'VizierServiceClient', -'WorkerPoolSpec', -'WriteTensorboardRunDataRequest', -'WriteTensorboardRunDataResponse', -'XraiAttribution', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json deleted file mode 100644 index 605e95582d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,1949 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1", - "schema": "1.0", - "services": { - "DatasetService": { - "clients": { - "grpc": { - "libraryClient": "DatasetServiceClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListAnnotations": { - "methods": [ - "list_annotations" - ] - }, - "ListDataItems": { - "methods": [ - "list_data_items" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - } - } - }, - "grpc-async": { - "libraryClient": "DatasetServiceAsyncClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListAnnotations": { - "methods": [ - "list_annotations" - ] - }, - "ListDataItems": { - "methods": [ - "list_data_items" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - } - } - } - } - }, - "EndpointService": { - "clients": { - "grpc": { - "libraryClient": "EndpointServiceClient", - "rpcs": { - "CreateEndpoint": { - "methods": [ - "create_endpoint" - ] - }, - "DeleteEndpoint": { - "methods": [ - "delete_endpoint" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "GetEndpoint": { - "methods": [ - "get_endpoint" - ] - }, - "ListEndpoints": { - "methods": [ - "list_endpoints" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateEndpoint": { - "methods": [ - "update_endpoint" - ] - } - } - }, - "grpc-async": { - "libraryClient": "EndpointServiceAsyncClient", - "rpcs": { - "CreateEndpoint": { - "methods": [ - "create_endpoint" - ] - }, - "DeleteEndpoint": { - "methods": [ - "delete_endpoint" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "GetEndpoint": { - "methods": [ - "get_endpoint" - ] - }, - "ListEndpoints": { - "methods": [ - "list_endpoints" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateEndpoint": { - "methods": [ - "update_endpoint" - ] - } - } - } - } - }, - "FeaturestoreOnlineServingService": { - "clients": { - "grpc": { - "libraryClient": "FeaturestoreOnlineServingServiceClient", - "rpcs": { - "ReadFeatureValues": { - "methods": [ - "read_feature_values" - ] - }, - "StreamingReadFeatureValues": { - "methods": [ - "streaming_read_feature_values" - ] - } - } - }, - "grpc-async": { - "libraryClient": "FeaturestoreOnlineServingServiceAsyncClient", - "rpcs": { - "ReadFeatureValues": { - "methods": [ - "read_feature_values" - ] - }, - "StreamingReadFeatureValues": { - "methods": [ - "streaming_read_feature_values" - ] - } - } - } - } - }, - "FeaturestoreService": { - "clients": { - "grpc": { - "libraryClient": "FeaturestoreServiceClient", - "rpcs": { - "BatchCreateFeatures": { - "methods": [ - "batch_create_features" - ] - }, - "BatchReadFeatureValues": { - "methods": [ - "batch_read_feature_values" - ] - }, - "CreateEntityType": { - "methods": [ - "create_entity_type" - ] - }, - "CreateFeature": { - "methods": [ - "create_feature" - ] - }, - "CreateFeaturestore": { - "methods": [ - "create_featurestore" - ] - }, - "DeleteEntityType": { - "methods": [ - "delete_entity_type" - ] - }, - "DeleteFeature": { - "methods": [ - "delete_feature" - ] - }, - "DeleteFeaturestore": { - "methods": [ - "delete_featurestore" - ] - }, - "ExportFeatureValues": { - "methods": [ - "export_feature_values" - ] - }, - "GetEntityType": { - "methods": [ - "get_entity_type" - ] - }, - "GetFeature": { - "methods": [ - "get_feature" - ] - }, - "GetFeaturestore": { - "methods": [ - "get_featurestore" - ] - }, - "ImportFeatureValues": { - "methods": [ - "import_feature_values" - ] - }, - "ListEntityTypes": { - "methods": [ - "list_entity_types" - ] - }, - "ListFeatures": { - "methods": [ - "list_features" - ] - }, - "ListFeaturestores": { - "methods": [ - "list_featurestores" - ] - }, - "SearchFeatures": { - "methods": [ - "search_features" - ] - }, - "UpdateEntityType": { - "methods": [ - "update_entity_type" - ] - }, - "UpdateFeature": { - "methods": [ - "update_feature" - ] - }, - "UpdateFeaturestore": { - "methods": [ - "update_featurestore" - ] - } - } - }, - "grpc-async": { - "libraryClient": "FeaturestoreServiceAsyncClient", - "rpcs": { - "BatchCreateFeatures": { - "methods": [ - "batch_create_features" - ] - }, - "BatchReadFeatureValues": { - "methods": [ - "batch_read_feature_values" - ] - }, - "CreateEntityType": { - "methods": [ - "create_entity_type" - ] - }, - "CreateFeature": { - "methods": [ - "create_feature" - ] - }, - "CreateFeaturestore": { - "methods": [ - "create_featurestore" - ] - }, - "DeleteEntityType": { - "methods": [ - "delete_entity_type" - ] - }, - "DeleteFeature": { - "methods": [ - "delete_feature" - ] - }, - "DeleteFeaturestore": { - "methods": [ - "delete_featurestore" - ] - }, - "ExportFeatureValues": { - "methods": [ - "export_feature_values" - ] - }, - "GetEntityType": { - "methods": [ - "get_entity_type" - ] - }, - "GetFeature": { - "methods": [ - "get_feature" - ] - }, - "GetFeaturestore": { - "methods": [ - "get_featurestore" - ] - }, - "ImportFeatureValues": { - "methods": [ - "import_feature_values" - ] - }, - "ListEntityTypes": { - "methods": [ - "list_entity_types" - ] - }, - "ListFeatures": { - "methods": [ - "list_features" - ] - }, - "ListFeaturestores": { - "methods": [ - "list_featurestores" - ] - }, - "SearchFeatures": { - "methods": [ - "search_features" - ] - }, - "UpdateEntityType": { - "methods": [ - "update_entity_type" - ] - }, - "UpdateFeature": { - "methods": [ - "update_feature" - ] - }, - "UpdateFeaturestore": { - "methods": [ - "update_featurestore" - ] - } - } - } - } - }, - "IndexEndpointService": { - "clients": { - "grpc": { - "libraryClient": "IndexEndpointServiceClient", - "rpcs": { - "CreateIndexEndpoint": { - "methods": [ - "create_index_endpoint" - ] - }, - "DeleteIndexEndpoint": { - "methods": [ - "delete_index_endpoint" - ] - }, - "DeployIndex": { - "methods": [ - "deploy_index" - ] - }, - "GetIndexEndpoint": { - "methods": [ - "get_index_endpoint" - ] - }, - "ListIndexEndpoints": { - "methods": [ - "list_index_endpoints" - ] - }, - "UndeployIndex": { - "methods": [ - "undeploy_index" - ] - }, - "UpdateIndexEndpoint": { - "methods": [ - "update_index_endpoint" - ] - } - } - }, - "grpc-async": { - "libraryClient": "IndexEndpointServiceAsyncClient", - "rpcs": { - "CreateIndexEndpoint": { - "methods": [ - "create_index_endpoint" - ] - }, - "DeleteIndexEndpoint": { - "methods": [ - "delete_index_endpoint" - ] - }, - "DeployIndex": { - "methods": [ - "deploy_index" - ] - }, - "GetIndexEndpoint": { - "methods": [ - "get_index_endpoint" - ] - }, - "ListIndexEndpoints": { - "methods": [ - "list_index_endpoints" - ] - }, - "UndeployIndex": { - "methods": [ - "undeploy_index" - ] - }, - "UpdateIndexEndpoint": { - "methods": [ - "update_index_endpoint" - ] - } - } - } - } - }, - "IndexService": { - "clients": { - "grpc": { - "libraryClient": "IndexServiceClient", - "rpcs": { - "CreateIndex": { - "methods": [ - "create_index" - ] - }, - "DeleteIndex": { - "methods": [ - "delete_index" - ] - }, - "GetIndex": { - "methods": [ - "get_index" - ] - }, - "ListIndexes": { - "methods": [ - "list_indexes" - ] - }, - "UpdateIndex": { - "methods": [ - "update_index" - ] - } - } - }, - "grpc-async": { - "libraryClient": "IndexServiceAsyncClient", - "rpcs": { - "CreateIndex": { - "methods": [ - "create_index" - ] - }, - "DeleteIndex": { - "methods": [ - "delete_index" - ] - }, - "GetIndex": { - "methods": [ - "get_index" - ] - }, - "ListIndexes": { - "methods": [ - "list_indexes" - ] - }, - "UpdateIndex": { - "methods": [ - "update_index" - ] - } - } - } - } - }, - "JobService": { - "clients": { - "grpc": { - "libraryClient": "JobServiceClient", - "rpcs": { - "CancelBatchPredictionJob": { - "methods": [ - "cancel_batch_prediction_job" - ] - }, - "CancelCustomJob": { - "methods": [ - "cancel_custom_job" - ] - }, - "CancelDataLabelingJob": { - "methods": [ - "cancel_data_labeling_job" - ] - }, - "CancelHyperparameterTuningJob": { - "methods": [ - "cancel_hyperparameter_tuning_job" - ] - }, - "CreateBatchPredictionJob": { - "methods": [ - "create_batch_prediction_job" - ] - }, - "CreateCustomJob": { - "methods": [ - "create_custom_job" - ] - }, - "CreateDataLabelingJob": { - "methods": [ - "create_data_labeling_job" - ] - }, - "CreateHyperparameterTuningJob": { - "methods": [ - "create_hyperparameter_tuning_job" - ] - }, - "CreateModelDeploymentMonitoringJob": { - "methods": [ - "create_model_deployment_monitoring_job" - ] - }, - "DeleteBatchPredictionJob": { - "methods": [ - "delete_batch_prediction_job" - ] - }, - "DeleteCustomJob": { - "methods": [ - "delete_custom_job" - ] - }, - "DeleteDataLabelingJob": { - "methods": [ - "delete_data_labeling_job" - ] - }, - "DeleteHyperparameterTuningJob": { - "methods": [ - "delete_hyperparameter_tuning_job" - ] - }, - "DeleteModelDeploymentMonitoringJob": { - "methods": [ - "delete_model_deployment_monitoring_job" - ] - }, - "GetBatchPredictionJob": { - "methods": [ - "get_batch_prediction_job" - ] - }, - "GetCustomJob": { - "methods": [ - "get_custom_job" - ] - }, - "GetDataLabelingJob": { - "methods": [ - "get_data_labeling_job" - ] - }, - "GetHyperparameterTuningJob": { - "methods": [ - "get_hyperparameter_tuning_job" - ] - }, - "GetModelDeploymentMonitoringJob": { - "methods": [ - "get_model_deployment_monitoring_job" - ] - }, - "ListBatchPredictionJobs": { - "methods": [ - "list_batch_prediction_jobs" - ] - }, - "ListCustomJobs": { - "methods": [ - "list_custom_jobs" - ] - }, - "ListDataLabelingJobs": { - "methods": [ - "list_data_labeling_jobs" - ] - }, - "ListHyperparameterTuningJobs": { - "methods": [ - "list_hyperparameter_tuning_jobs" - ] - }, - "ListModelDeploymentMonitoringJobs": { - "methods": [ - "list_model_deployment_monitoring_jobs" - ] - }, - "PauseModelDeploymentMonitoringJob": { - "methods": [ - "pause_model_deployment_monitoring_job" - ] - }, - "ResumeModelDeploymentMonitoringJob": { - "methods": [ - "resume_model_deployment_monitoring_job" - ] - }, - "SearchModelDeploymentMonitoringStatsAnomalies": { - "methods": [ - "search_model_deployment_monitoring_stats_anomalies" - ] - }, - "UpdateModelDeploymentMonitoringJob": { - "methods": [ - "update_model_deployment_monitoring_job" - ] - } - } - }, - "grpc-async": { - "libraryClient": "JobServiceAsyncClient", - "rpcs": { - "CancelBatchPredictionJob": { - "methods": [ - "cancel_batch_prediction_job" - ] - }, - "CancelCustomJob": { - "methods": [ - "cancel_custom_job" - ] - }, - "CancelDataLabelingJob": { - "methods": [ - "cancel_data_labeling_job" - ] - }, - "CancelHyperparameterTuningJob": { - "methods": [ - "cancel_hyperparameter_tuning_job" - ] - }, - "CreateBatchPredictionJob": { - "methods": [ - "create_batch_prediction_job" - ] - }, - "CreateCustomJob": { - "methods": [ - "create_custom_job" - ] - }, - "CreateDataLabelingJob": { - "methods": [ - "create_data_labeling_job" - ] - }, - "CreateHyperparameterTuningJob": { - "methods": [ - "create_hyperparameter_tuning_job" - ] - }, - "CreateModelDeploymentMonitoringJob": { - "methods": [ - "create_model_deployment_monitoring_job" - ] - }, - "DeleteBatchPredictionJob": { - "methods": [ - "delete_batch_prediction_job" - ] - }, - "DeleteCustomJob": { - "methods": [ - "delete_custom_job" - ] - }, - "DeleteDataLabelingJob": { - "methods": [ - "delete_data_labeling_job" - ] - }, - "DeleteHyperparameterTuningJob": { - "methods": [ - "delete_hyperparameter_tuning_job" - ] - }, - "DeleteModelDeploymentMonitoringJob": { - "methods": [ - "delete_model_deployment_monitoring_job" - ] - }, - "GetBatchPredictionJob": { - "methods": [ - "get_batch_prediction_job" - ] - }, - "GetCustomJob": { - "methods": [ - "get_custom_job" - ] - }, - "GetDataLabelingJob": { - "methods": [ - "get_data_labeling_job" - ] - }, - "GetHyperparameterTuningJob": { - "methods": [ - "get_hyperparameter_tuning_job" - ] - }, - "GetModelDeploymentMonitoringJob": { - "methods": [ - "get_model_deployment_monitoring_job" - ] - }, - "ListBatchPredictionJobs": { - "methods": [ - "list_batch_prediction_jobs" - ] - }, - "ListCustomJobs": { - "methods": [ - "list_custom_jobs" - ] - }, - "ListDataLabelingJobs": { - "methods": [ - "list_data_labeling_jobs" - ] - }, - "ListHyperparameterTuningJobs": { - "methods": [ - "list_hyperparameter_tuning_jobs" - ] - }, - "ListModelDeploymentMonitoringJobs": { - "methods": [ - "list_model_deployment_monitoring_jobs" - ] - }, - "PauseModelDeploymentMonitoringJob": { - "methods": [ - "pause_model_deployment_monitoring_job" - ] - }, - "ResumeModelDeploymentMonitoringJob": { - "methods": [ - "resume_model_deployment_monitoring_job" - ] - }, - "SearchModelDeploymentMonitoringStatsAnomalies": { - "methods": [ - "search_model_deployment_monitoring_stats_anomalies" - ] - }, - "UpdateModelDeploymentMonitoringJob": { - "methods": [ - "update_model_deployment_monitoring_job" - ] - } - } - } - } - }, - "MetadataService": { - "clients": { - "grpc": { - "libraryClient": "MetadataServiceClient", - "rpcs": { - "AddContextArtifactsAndExecutions": { - "methods": [ - "add_context_artifacts_and_executions" - ] - }, - "AddContextChildren": { - "methods": [ - "add_context_children" - ] - }, - "AddExecutionEvents": { - "methods": [ - "add_execution_events" - ] - }, - "CreateArtifact": { - "methods": [ - "create_artifact" - ] - }, - "CreateContext": { - "methods": [ - "create_context" - ] - }, - "CreateExecution": { - "methods": [ - "create_execution" - ] - }, - "CreateMetadataSchema": { - "methods": [ - "create_metadata_schema" - ] - }, - "CreateMetadataStore": { - "methods": [ - "create_metadata_store" - ] - }, - "DeleteContext": { - "methods": [ - "delete_context" - ] - }, - "DeleteMetadataStore": { - "methods": [ - "delete_metadata_store" - ] - }, - "GetArtifact": { - "methods": [ - "get_artifact" - ] - }, - "GetContext": { - "methods": [ - "get_context" - ] - }, - "GetExecution": { - "methods": [ - "get_execution" - ] - }, - "GetMetadataSchema": { - "methods": [ - "get_metadata_schema" - ] - }, - "GetMetadataStore": { - "methods": [ - "get_metadata_store" - ] - }, - "ListArtifacts": { - "methods": [ - "list_artifacts" - ] - }, - "ListContexts": { - "methods": [ - "list_contexts" - ] - }, - "ListExecutions": { - "methods": [ - "list_executions" - ] - }, - "ListMetadataSchemas": { - "methods": [ - "list_metadata_schemas" - ] - }, - "ListMetadataStores": { - "methods": [ - "list_metadata_stores" - ] - }, - "QueryArtifactLineageSubgraph": { - "methods": [ - "query_artifact_lineage_subgraph" - ] - }, - "QueryContextLineageSubgraph": { - "methods": [ - "query_context_lineage_subgraph" - ] - }, - "QueryExecutionInputsAndOutputs": { - "methods": [ - "query_execution_inputs_and_outputs" - ] - }, - "UpdateArtifact": { - "methods": [ - "update_artifact" - ] - }, - "UpdateContext": { - "methods": [ - "update_context" - ] - }, - "UpdateExecution": { - "methods": [ - "update_execution" - ] - } - } - }, - "grpc-async": { - "libraryClient": "MetadataServiceAsyncClient", - "rpcs": { - "AddContextArtifactsAndExecutions": { - "methods": [ - "add_context_artifacts_and_executions" - ] - }, - "AddContextChildren": { - "methods": [ - "add_context_children" - ] - }, - "AddExecutionEvents": { - "methods": [ - "add_execution_events" - ] - }, - "CreateArtifact": { - "methods": [ - "create_artifact" - ] - }, - "CreateContext": { - "methods": [ - "create_context" - ] - }, - "CreateExecution": { - "methods": [ - "create_execution" - ] - }, - "CreateMetadataSchema": { - "methods": [ - "create_metadata_schema" - ] - }, - "CreateMetadataStore": { - "methods": [ - "create_metadata_store" - ] - }, - "DeleteContext": { - "methods": [ - "delete_context" - ] - }, - "DeleteMetadataStore": { - "methods": [ - "delete_metadata_store" - ] - }, - "GetArtifact": { - "methods": [ - "get_artifact" - ] - }, - "GetContext": { - "methods": [ - "get_context" - ] - }, - "GetExecution": { - "methods": [ - "get_execution" - ] - }, - "GetMetadataSchema": { - "methods": [ - "get_metadata_schema" - ] - }, - "GetMetadataStore": { - "methods": [ - "get_metadata_store" - ] - }, - "ListArtifacts": { - "methods": [ - "list_artifacts" - ] - }, - "ListContexts": { - "methods": [ - "list_contexts" - ] - }, - "ListExecutions": { - "methods": [ - "list_executions" - ] - }, - "ListMetadataSchemas": { - "methods": [ - "list_metadata_schemas" - ] - }, - "ListMetadataStores": { - "methods": [ - "list_metadata_stores" - ] - }, - "QueryArtifactLineageSubgraph": { - "methods": [ - "query_artifact_lineage_subgraph" - ] - }, - "QueryContextLineageSubgraph": { - "methods": [ - "query_context_lineage_subgraph" - ] - }, - "QueryExecutionInputsAndOutputs": { - "methods": [ - "query_execution_inputs_and_outputs" - ] - }, - "UpdateArtifact": { - "methods": [ - "update_artifact" - ] - }, - "UpdateContext": { - "methods": [ - "update_context" - ] - }, - "UpdateExecution": { - "methods": [ - "update_execution" - ] - } - } - } - } - }, - "MigrationService": { - "clients": { - "grpc": { - "libraryClient": "MigrationServiceClient", - "rpcs": { - "BatchMigrateResources": { - "methods": [ - "batch_migrate_resources" - ] - }, - "SearchMigratableResources": { - "methods": [ - "search_migratable_resources" - ] - } - } - }, - "grpc-async": { - "libraryClient": "MigrationServiceAsyncClient", - "rpcs": { - "BatchMigrateResources": { - "methods": [ - "batch_migrate_resources" - ] - }, - "SearchMigratableResources": { - "methods": [ - "search_migratable_resources" - ] - } - } - } - } - }, - "ModelService": { - "clients": { - "grpc": { - "libraryClient": "ModelServiceClient", - "rpcs": { - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetModelEvaluationSlice": { - "methods": [ - "get_model_evaluation_slice" - ] - }, - "ListModelEvaluationSlices": { - "methods": [ - "list_model_evaluation_slices" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - }, - "UploadModel": { - "methods": [ - "upload_model" - ] - } - } - }, - "grpc-async": { - "libraryClient": "ModelServiceAsyncClient", - "rpcs": { - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetModelEvaluationSlice": { - "methods": [ - "get_model_evaluation_slice" - ] - }, - "ListModelEvaluationSlices": { - "methods": [ - "list_model_evaluation_slices" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - }, - "UploadModel": { - "methods": [ - "upload_model" - ] - } - } - } - } - }, - "PipelineService": { - "clients": { - "grpc": { - "libraryClient": "PipelineServiceClient", - "rpcs": { - "CancelPipelineJob": { - "methods": [ - "cancel_pipeline_job" - ] - }, - "CancelTrainingPipeline": { - "methods": [ - "cancel_training_pipeline" - ] - }, - "CreatePipelineJob": { - "methods": [ - "create_pipeline_job" - ] - }, - "CreateTrainingPipeline": { - "methods": [ - "create_training_pipeline" - ] - }, - "DeletePipelineJob": { - "methods": [ - "delete_pipeline_job" - ] - }, - "DeleteTrainingPipeline": { - "methods": [ - "delete_training_pipeline" - ] - }, - "GetPipelineJob": { - "methods": [ - "get_pipeline_job" - ] - }, - "GetTrainingPipeline": { - "methods": [ - "get_training_pipeline" - ] - }, - "ListPipelineJobs": { - "methods": [ - "list_pipeline_jobs" - ] - }, - "ListTrainingPipelines": { - "methods": [ - "list_training_pipelines" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PipelineServiceAsyncClient", - "rpcs": { - "CancelPipelineJob": { - "methods": [ - "cancel_pipeline_job" - ] - }, - "CancelTrainingPipeline": { - "methods": [ - "cancel_training_pipeline" - ] - }, - "CreatePipelineJob": { - "methods": [ - "create_pipeline_job" - ] - }, - "CreateTrainingPipeline": { - "methods": [ - "create_training_pipeline" - ] - }, - "DeletePipelineJob": { - "methods": [ - "delete_pipeline_job" - ] - }, - "DeleteTrainingPipeline": { - "methods": [ - "delete_training_pipeline" - ] - }, - "GetPipelineJob": { - "methods": [ - "get_pipeline_job" - ] - }, - "GetTrainingPipeline": { - "methods": [ - "get_training_pipeline" - ] - }, - "ListPipelineJobs": { - "methods": [ - "list_pipeline_jobs" - ] - }, - "ListTrainingPipelines": { - "methods": [ - "list_training_pipelines" - ] - } - } - } - } - }, - "PredictionService": { - "clients": { - "grpc": { - "libraryClient": "PredictionServiceClient", - "rpcs": { - "Explain": { - "methods": [ - "explain" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PredictionServiceAsyncClient", - "rpcs": { - "Explain": { - "methods": [ - "explain" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - } - } - } - } - }, - "SpecialistPoolService": { - "clients": { - "grpc": { - "libraryClient": "SpecialistPoolServiceClient", - "rpcs": { - "CreateSpecialistPool": { - "methods": [ - "create_specialist_pool" - ] - }, - "DeleteSpecialistPool": { - "methods": [ - "delete_specialist_pool" - ] - }, - "GetSpecialistPool": { - "methods": [ - "get_specialist_pool" - ] - }, - "ListSpecialistPools": { - "methods": [ - "list_specialist_pools" - ] - }, - "UpdateSpecialistPool": { - "methods": [ - "update_specialist_pool" - ] - } - } - }, - "grpc-async": { - "libraryClient": "SpecialistPoolServiceAsyncClient", - "rpcs": { - "CreateSpecialistPool": { - "methods": [ - "create_specialist_pool" - ] - }, - "DeleteSpecialistPool": { - "methods": [ - "delete_specialist_pool" - ] - }, - "GetSpecialistPool": { - "methods": [ - "get_specialist_pool" - ] - }, - "ListSpecialistPools": { - "methods": [ - "list_specialist_pools" - ] - }, - "UpdateSpecialistPool": { - "methods": [ - "update_specialist_pool" - ] - } - } - } - } - }, - "TensorboardService": { - "clients": { - "grpc": { - "libraryClient": "TensorboardServiceClient", - "rpcs": { - "CreateTensorboard": { - "methods": [ - "create_tensorboard" - ] - }, - "CreateTensorboardExperiment": { - "methods": [ - "create_tensorboard_experiment" - ] - }, - "CreateTensorboardRun": { - "methods": [ - "create_tensorboard_run" - ] - }, - "CreateTensorboardTimeSeries": { - "methods": [ - "create_tensorboard_time_series" - ] - }, - "DeleteTensorboard": { - "methods": [ - "delete_tensorboard" - ] - }, - "DeleteTensorboardExperiment": { - "methods": [ - "delete_tensorboard_experiment" - ] - }, - "DeleteTensorboardRun": { - "methods": [ - "delete_tensorboard_run" - ] - }, - "DeleteTensorboardTimeSeries": { - "methods": [ - "delete_tensorboard_time_series" - ] - }, - "ExportTensorboardTimeSeriesData": { - "methods": [ - "export_tensorboard_time_series_data" - ] - }, - "GetTensorboard": { - "methods": [ - "get_tensorboard" - ] - }, - "GetTensorboardExperiment": { - "methods": [ - "get_tensorboard_experiment" - ] - }, - "GetTensorboardRun": { - "methods": [ - "get_tensorboard_run" - ] - }, - "GetTensorboardTimeSeries": { - "methods": [ - "get_tensorboard_time_series" - ] - }, - "ListTensorboardExperiments": { - "methods": [ - "list_tensorboard_experiments" - ] - }, - "ListTensorboardRuns": { - "methods": [ - "list_tensorboard_runs" - ] - }, - "ListTensorboardTimeSeries": { - "methods": [ - "list_tensorboard_time_series" - ] - }, - "ListTensorboards": { - "methods": [ - "list_tensorboards" - ] - }, - "ReadTensorboardBlobData": { - "methods": [ - "read_tensorboard_blob_data" - ] - }, - "ReadTensorboardTimeSeriesData": { - "methods": [ - "read_tensorboard_time_series_data" - ] - }, - "UpdateTensorboard": { - "methods": [ - "update_tensorboard" - ] - }, - "UpdateTensorboardExperiment": { - "methods": [ - "update_tensorboard_experiment" - ] - }, - "UpdateTensorboardRun": { - "methods": [ - "update_tensorboard_run" - ] - }, - "UpdateTensorboardTimeSeries": { - "methods": [ - "update_tensorboard_time_series" - ] - }, - "WriteTensorboardRunData": { - "methods": [ - "write_tensorboard_run_data" - ] - } - } - }, - "grpc-async": { - "libraryClient": "TensorboardServiceAsyncClient", - "rpcs": { - "CreateTensorboard": { - "methods": [ - "create_tensorboard" - ] - }, - "CreateTensorboardExperiment": { - "methods": [ - "create_tensorboard_experiment" - ] - }, - "CreateTensorboardRun": { - "methods": [ - "create_tensorboard_run" - ] - }, - "CreateTensorboardTimeSeries": { - "methods": [ - "create_tensorboard_time_series" - ] - }, - "DeleteTensorboard": { - "methods": [ - "delete_tensorboard" - ] - }, - "DeleteTensorboardExperiment": { - "methods": [ - "delete_tensorboard_experiment" - ] - }, - "DeleteTensorboardRun": { - "methods": [ - "delete_tensorboard_run" - ] - }, - "DeleteTensorboardTimeSeries": { - "methods": [ - "delete_tensorboard_time_series" - ] - }, - "ExportTensorboardTimeSeriesData": { - "methods": [ - "export_tensorboard_time_series_data" - ] - }, - "GetTensorboard": { - "methods": [ - "get_tensorboard" - ] - }, - "GetTensorboardExperiment": { - "methods": [ - "get_tensorboard_experiment" - ] - }, - "GetTensorboardRun": { - "methods": [ - "get_tensorboard_run" - ] - }, - "GetTensorboardTimeSeries": { - "methods": [ - "get_tensorboard_time_series" - ] - }, - "ListTensorboardExperiments": { - "methods": [ - "list_tensorboard_experiments" - ] - }, - "ListTensorboardRuns": { - "methods": [ - "list_tensorboard_runs" - ] - }, - "ListTensorboardTimeSeries": { - "methods": [ - "list_tensorboard_time_series" - ] - }, - "ListTensorboards": { - "methods": [ - "list_tensorboards" - ] - }, - "ReadTensorboardBlobData": { - "methods": [ - "read_tensorboard_blob_data" - ] - }, - "ReadTensorboardTimeSeriesData": { - "methods": [ - "read_tensorboard_time_series_data" - ] - }, - "UpdateTensorboard": { - "methods": [ - "update_tensorboard" - ] - }, - "UpdateTensorboardExperiment": { - "methods": [ - "update_tensorboard_experiment" - ] - }, - "UpdateTensorboardRun": { - "methods": [ - "update_tensorboard_run" - ] - }, - "UpdateTensorboardTimeSeries": { - "methods": [ - "update_tensorboard_time_series" - ] - }, - "WriteTensorboardRunData": { - "methods": [ - "write_tensorboard_run_data" - ] - } - } - } - } - }, - "VizierService": { - "clients": { - "grpc": { - "libraryClient": "VizierServiceClient", - "rpcs": { - "AddTrialMeasurement": { - "methods": [ - "add_trial_measurement" - ] - }, - "CheckTrialEarlyStoppingState": { - "methods": [ - "check_trial_early_stopping_state" - ] - }, - "CompleteTrial": { - "methods": [ - "complete_trial" - ] - }, - "CreateStudy": { - "methods": [ - "create_study" - ] - }, - "CreateTrial": { - "methods": [ - "create_trial" - ] - }, - "DeleteStudy": { - "methods": [ - "delete_study" - ] - }, - "DeleteTrial": { - "methods": [ - "delete_trial" - ] - }, - "GetStudy": { - "methods": [ - "get_study" - ] - }, - "GetTrial": { - "methods": [ - "get_trial" - ] - }, - "ListOptimalTrials": { - "methods": [ - "list_optimal_trials" - ] - }, - "ListStudies": { - "methods": [ - "list_studies" - ] - }, - "ListTrials": { - "methods": [ - "list_trials" - ] - }, - "LookupStudy": { - "methods": [ - "lookup_study" - ] - }, - "StopTrial": { - "methods": [ - "stop_trial" - ] - }, - "SuggestTrials": { - "methods": [ - "suggest_trials" - ] - } - } - }, - "grpc-async": { - "libraryClient": "VizierServiceAsyncClient", - "rpcs": { - "AddTrialMeasurement": { - "methods": [ - "add_trial_measurement" - ] - }, - "CheckTrialEarlyStoppingState": { - "methods": [ - "check_trial_early_stopping_state" - ] - }, - "CompleteTrial": { - "methods": [ - "complete_trial" - ] - }, - "CreateStudy": { - "methods": [ - "create_study" - ] - }, - "CreateTrial": { - "methods": [ - "create_trial" - ] - }, - "DeleteStudy": { - "methods": [ - "delete_study" - ] - }, - "DeleteTrial": { - "methods": [ - "delete_trial" - ] - }, - "GetStudy": { - "methods": [ - "get_study" - ] - }, - "GetTrial": { - "methods": [ - "get_trial" - ] - }, - "ListOptimalTrials": { - "methods": [ - "list_optimal_trials" - ] - }, - "ListStudies": { - "methods": [ - "list_studies" - ] - }, - "ListTrials": { - "methods": [ - "list_trials" - ] - }, - "LookupStudy": { - "methods": [ - "lookup_study" - ] - }, - "StopTrial": { - "methods": [ - "stop_trial" - ] - }, - "SuggestTrials": { - "methods": [ - "suggest_trials" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed deleted file mode 100644 index 228f1c51c6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py deleted file mode 100644 index 44e8fb2115..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import DatasetServiceClient -from .async_client import DatasetServiceAsyncClient - -__all__ = ( - 'DatasetServiceClient', - 'DatasetServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py deleted file mode 100644 index 8255259240..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ /dev/null @@ -1,1074 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport -from .client import DatasetServiceClient - - -class DatasetServiceAsyncClient: - """""" - - _client: DatasetServiceClient - - DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT - - annotation_path = staticmethod(DatasetServiceClient.annotation_path) - parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path) - annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path) - parse_annotation_spec_path = staticmethod(DatasetServiceClient.parse_annotation_spec_path) - data_item_path = staticmethod(DatasetServiceClient.data_item_path) - parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path) - dataset_path = staticmethod(DatasetServiceClient.dataset_path) - parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path) - common_billing_account_path = staticmethod(DatasetServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(DatasetServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(DatasetServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(DatasetServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(DatasetServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(DatasetServiceClient.parse_common_organization_path) - common_project_path = staticmethod(DatasetServiceClient.common_project_path) - parse_common_project_path = staticmethod(DatasetServiceClient.parse_common_project_path) - common_location_path = staticmethod(DatasetServiceClient.common_location_path) - parse_common_location_path = staticmethod(DatasetServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceAsyncClient: The constructed client. - """ - return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceAsyncClient: The constructed client. - """ - return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> DatasetServiceTransport: - """Returns the transport used by the client instance. - - Returns: - DatasetServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, DatasetServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the dataset service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.DatasetServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = DatasetServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_dataset(self, - request: dataset_service.CreateDatasetRequest = None, - *, - parent: str = None, - dataset: gca_dataset.Dataset = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest`): - The request object. Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Dataset in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (:class:`google.cloud.aiplatform_v1beta1.types.Dataset`): - Required. The Dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Dataset` A - collection of DataItems and Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.CreateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_dataset.Dataset, - metadata_type=dataset_service.CreateDatasetOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_dataset(self, - request: dataset_service.GetDatasetRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetDatasetRequest`): - The request object. Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. - name (:class:`str`): - Required. The name of the Dataset - resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.GetDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_dataset(self, - request: dataset_service.UpdateDatasetRequest = None, - *, - dataset: gca_dataset.Dataset = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest`): - The request object. Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. - dataset (:class:`google.cloud.aiplatform_v1beta1.types.Dataset`): - Required. The Dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.UpdateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_datasets(self, - request: dataset_service.ListDatasetsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsAsyncPager: - r"""Lists Datasets in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest`): - The request object. Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - parent (:class:`str`): - Required. The name of the Dataset's parent resource. - Format: ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsAsyncPager: - Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListDatasetsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_datasets, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDatasetsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_dataset(self, - request: dataset_service.DeleteDatasetRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest`): - The request object. Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. - name (:class:`str`): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.DeleteDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def import_data(self, - request: dataset_service.ImportDataRequest = None, - *, - name: str = None, - import_configs: Sequence[dataset.ImportDataConfig] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Imports data into a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ImportDataRequest`): - The request object. Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - name (:class:`str`): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - import_configs (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]`): - Required. The desired input - locations. The contents of all input - locations will be imported in one batch. - - This corresponds to the ``import_configs`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` - Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ImportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if import_configs: - request.import_configs.extend(import_configs) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.import_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - dataset_service.ImportDataResponse, - metadata_type=dataset_service.ImportDataOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_data(self, - request: dataset_service.ExportDataRequest = None, - *, - name: str = None, - export_config: dataset.ExportDataConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports data from a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ExportDataRequest`): - The request object. Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - name (:class:`str`): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - export_config (:class:`google.cloud.aiplatform_v1beta1.types.ExportDataConfig`): - Required. The desired output - location. - - This corresponds to the ``export_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` - Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ExportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if export_config is not None: - request.export_config = export_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - dataset_service.ExportDataResponse, - metadata_type=dataset_service.ExportDataOperationMetadata, - ) - - # Done; return the response. - return response - - async def list_data_items(self, - request: dataset_service.ListDataItemsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataItemsAsyncPager: - r"""Lists DataItems in a Dataset. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest`): - The request object. Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - parent (:class:`str`): - Required. The resource name of the Dataset to list - DataItems from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsAsyncPager: - Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListDataItemsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_data_items, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDataItemsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_annotation_spec(self, - request: dataset_service.GetAnnotationSpecRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an AnnotationSpec. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest`): - The request object. Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. - name (:class:`str`): - Required. The name of the AnnotationSpec resource. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AnnotationSpec: - Identifies a concept with which - DataItems may be annotated with. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.GetAnnotationSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_annotation_spec, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_annotations(self, - request: dataset_service.ListAnnotationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAnnotationsAsyncPager: - r"""Lists Annotations belongs to a dataitem - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest`): - The request object. Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - parent (:class:`str`): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsAsyncPager: - Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListAnnotationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_annotations, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListAnnotationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "DatasetServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py deleted file mode 100644 index d7d6fab3c3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ /dev/null @@ -1,1288 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import DatasetServiceGrpcTransport -from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport - - -class DatasetServiceClientMeta(type): - """Metaclass for the DatasetService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] - _transport_registry["grpc"] = DatasetServiceGrpcTransport - _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[DatasetServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class DatasetServiceClient(metaclass=DatasetServiceClientMeta): - """""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> DatasetServiceTransport: - """Returns the transport used by the client instance. - - Returns: - DatasetServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotation_path(project: str,location: str,dataset: str,data_item: str,annotation: str,) -> str: - """Returns a fully-qualified annotation string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) - - @staticmethod - def parse_annotation_path(path: str) -> Dict[str,str]: - """Parses a annotation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: - """Returns a fully-qualified annotation_spec string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - - @staticmethod - def parse_annotation_spec_path(path: str) -> Dict[str,str]: - """Parses a annotation_spec path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def data_item_path(project: str,location: str,dataset: str,data_item: str,) -> str: - """Returns a fully-qualified data_item string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) - - @staticmethod - def parse_data_item_path(path: str) -> Dict[str,str]: - """Parses a data_item path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, DatasetServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the dataset service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, DatasetServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, DatasetServiceTransport): - # transport is a DatasetServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_dataset(self, - request: dataset_service.CreateDatasetRequest = None, - *, - parent: str = None, - dataset: gca_dataset.Dataset = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a Dataset. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest): - The request object. Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. - parent (str): - Required. The resource name of the Location to create - the Dataset in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (google.cloud.aiplatform_v1beta1.types.Dataset): - Required. The Dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Dataset` A - collection of DataItems and Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.CreateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.CreateDatasetRequest): - request = dataset_service.CreateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_dataset.Dataset, - metadata_type=dataset_service.CreateDatasetOperationMetadata, - ) - - # Done; return the response. - return response - - def get_dataset(self, - request: dataset_service.GetDatasetRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a Dataset. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetDatasetRequest): - The request object. Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. - name (str): - Required. The name of the Dataset - resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.GetDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.GetDatasetRequest): - request = dataset_service.GetDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_dataset(self, - request: dataset_service.UpdateDatasetRequest = None, - *, - dataset: gca_dataset.Dataset = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a Dataset. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest): - The request object. Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. - dataset (google.cloud.aiplatform_v1beta1.types.Dataset): - Required. The Dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.UpdateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.UpdateDatasetRequest): - request = dataset_service.UpdateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_datasets(self, - request: dataset_service.ListDatasetsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsPager: - r"""Lists Datasets in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): - The request object. Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - parent (str): - Required. The name of the Dataset's parent resource. - Format: ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsPager: - Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListDatasetsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListDatasetsRequest): - request = dataset_service.ListDatasetsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_datasets] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDatasetsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_dataset(self, - request: dataset_service.DeleteDatasetRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Dataset. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest): - The request object. Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. - name (str): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.DeleteDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.DeleteDatasetRequest): - request = dataset_service.DeleteDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def import_data(self, - request: dataset_service.ImportDataRequest = None, - *, - name: str = None, - import_configs: Sequence[dataset.ImportDataConfig] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Imports data into a Dataset. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ImportDataRequest): - The request object. Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - import_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]): - Required. The desired input - locations. The contents of all input - locations will be imported in one batch. - - This corresponds to the ``import_configs`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` - Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ImportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ImportDataRequest): - request = dataset_service.ImportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if import_configs is not None: - request.import_configs = import_configs - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.import_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - dataset_service.ImportDataResponse, - metadata_type=dataset_service.ImportDataOperationMetadata, - ) - - # Done; return the response. - return response - - def export_data(self, - request: dataset_service.ExportDataRequest = None, - *, - name: str = None, - export_config: dataset.ExportDataConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports data from a Dataset. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ExportDataRequest): - The request object. Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - export_config (google.cloud.aiplatform_v1beta1.types.ExportDataConfig): - Required. The desired output - location. - - This corresponds to the ``export_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` - Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ExportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ExportDataRequest): - request = dataset_service.ExportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if export_config is not None: - request.export_config = export_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - dataset_service.ExportDataResponse, - metadata_type=dataset_service.ExportDataOperationMetadata, - ) - - # Done; return the response. - return response - - def list_data_items(self, - request: dataset_service.ListDataItemsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataItemsPager: - r"""Lists DataItems in a Dataset. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): - The request object. Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - parent (str): - Required. The resource name of the Dataset to list - DataItems from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsPager: - Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListDataItemsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListDataItemsRequest): - request = dataset_service.ListDataItemsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_data_items] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDataItemsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_annotation_spec(self, - request: dataset_service.GetAnnotationSpecRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an AnnotationSpec. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest): - The request object. Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. - name (str): - Required. The name of the AnnotationSpec resource. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AnnotationSpec: - Identifies a concept with which - DataItems may be annotated with. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.GetAnnotationSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.GetAnnotationSpecRequest): - request = dataset_service.GetAnnotationSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_annotations(self, - request: dataset_service.ListAnnotationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAnnotationsPager: - r"""Lists Annotations belongs to a dataitem - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): - The request object. Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - parent (str): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsPager: - Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListAnnotationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListAnnotationsRequest): - request = dataset_service.ListAnnotationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_annotations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListAnnotationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "DatasetServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py deleted file mode 100644 index 35216020c7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service - - -class ListDatasetsPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListDatasetsResponse], - request: dataset_service.ListDatasetsRequest, - response: dataset_service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[dataset_service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[dataset.Dataset]: - for page in self.pages: - yield from page.datasets - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDatasetsAsyncPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListDatasetsResponse]], - request: dataset_service.ListDatasetsRequest, - response: dataset_service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[dataset_service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[dataset.Dataset]: - async def async_generator(): - async for page in self.pages: - for response in page.datasets: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataItemsPager: - """A pager for iterating through ``list_data_items`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``data_items`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDataItems`` requests and continue to iterate - through the ``data_items`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListDataItemsResponse], - request: dataset_service.ListDataItemsRequest, - response: dataset_service.ListDataItemsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDataItemsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[dataset_service.ListDataItemsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[data_item.DataItem]: - for page in self.pages: - yield from page.data_items - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataItemsAsyncPager: - """A pager for iterating through ``list_data_items`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``data_items`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDataItems`` requests and continue to iterate - through the ``data_items`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListDataItemsResponse]], - request: dataset_service.ListDataItemsRequest, - response: dataset_service.ListDataItemsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDataItemsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[dataset_service.ListDataItemsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[data_item.DataItem]: - async def async_generator(): - async for page in self.pages: - for response in page.data_items: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAnnotationsPager: - """A pager for iterating through ``list_annotations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``annotations`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListAnnotations`` requests and continue to iterate - through the ``annotations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListAnnotationsResponse], - request: dataset_service.ListAnnotationsRequest, - response: dataset_service.ListAnnotationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListAnnotationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[dataset_service.ListAnnotationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[annotation.Annotation]: - for page in self.pages: - yield from page.annotations - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAnnotationsAsyncPager: - """A pager for iterating through ``list_annotations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``annotations`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListAnnotations`` requests and continue to iterate - through the ``annotations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListAnnotationsResponse]], - request: dataset_service.ListAnnotationsRequest, - response: dataset_service.ListAnnotationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListAnnotationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[dataset_service.ListAnnotationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[annotation.Annotation]: - async def async_generator(): - async for page in self.pages: - for response in page.annotations: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py deleted file mode 100644 index 561b0c5cfd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import DatasetServiceTransport -from .grpc import DatasetServiceGrpcTransport -from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] -_transport_registry['grpc'] = DatasetServiceGrpcTransport -_transport_registry['grpc_asyncio'] = DatasetServiceGrpcAsyncIOTransport - -__all__ = ( - 'DatasetServiceTransport', - 'DatasetServiceGrpcTransport', - 'DatasetServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py deleted file mode 100644 index c5e42b084d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ /dev/null @@ -1,304 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class DatasetServiceTransport(abc.ABC): - """Abstract transport class for DatasetService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_dataset: gapic_v1.method.wrap_method( - self.create_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.get_dataset: gapic_v1.method.wrap_method( - self.get_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.update_dataset: gapic_v1.method.wrap_method( - self.update_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.list_datasets: gapic_v1.method.wrap_method( - self.list_datasets, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_dataset: gapic_v1.method.wrap_method( - self.delete_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.import_data: gapic_v1.method.wrap_method( - self.import_data, - default_timeout=5.0, - client_info=client_info, - ), - self.export_data: gapic_v1.method.wrap_method( - self.export_data, - default_timeout=5.0, - client_info=client_info, - ), - self.list_data_items: gapic_v1.method.wrap_method( - self.list_data_items, - default_timeout=5.0, - client_info=client_info, - ), - self.get_annotation_spec: gapic_v1.method.wrap_method( - self.get_annotation_spec, - default_timeout=5.0, - client_info=client_info, - ), - self.list_annotations: gapic_v1.method.wrap_method( - self.list_annotations, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - Union[ - dataset.Dataset, - Awaitable[dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - Union[ - gca_dataset.Dataset, - Awaitable[gca_dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - Union[ - dataset_service.ListDatasetsResponse, - Awaitable[dataset_service.ListDatasetsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - Union[ - dataset_service.ListDataItemsResponse, - Awaitable[dataset_service.ListDataItemsResponse] - ]]: - raise NotImplementedError() - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - Union[ - annotation_spec.AnnotationSpec, - Awaitable[annotation_spec.AnnotationSpec] - ]]: - raise NotImplementedError() - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - Union[ - dataset_service.ListAnnotationsResponse, - Awaitable[dataset_service.ListAnnotationsResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'DatasetServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py deleted file mode 100644 index 9091949fe5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py +++ /dev/null @@ -1,506 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore -from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO - - -class DatasetServiceGrpcTransport(DatasetServiceTransport): - """gRPC backend transport for DatasetService. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a Dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/CreateDataset', - request_serializer=dataset_service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - dataset.Dataset]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a Dataset. - - Returns: - Callable[[~.GetDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/GetDataset', - request_serializer=dataset_service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - gca_dataset.Dataset]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a Dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/UpdateDataset', - request_serializer=dataset_service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - dataset_service.ListDatasetsResponse]: - r"""Return a callable for the list datasets method over gRPC. - - Lists Datasets in a Location. - - Returns: - Callable[[~.ListDatasetsRequest], - ~.ListDatasetsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListDatasets', - request_serializer=dataset_service.ListDatasetsRequest.serialize, - response_deserializer=dataset_service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a Dataset. - - Returns: - Callable[[~.DeleteDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/DeleteDataset', - request_serializer=dataset_service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a Dataset. - - Returns: - Callable[[~.ImportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ImportData', - request_serializer=dataset_service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the export data method over gRPC. - - Exports data from a Dataset. - - Returns: - Callable[[~.ExportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ExportData', - request_serializer=dataset_service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - dataset_service.ListDataItemsResponse]: - r"""Return a callable for the list data items method over gRPC. - - Lists DataItems in a Dataset. - - Returns: - Callable[[~.ListDataItemsRequest], - ~.ListDataItemsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_items' not in self._stubs: - self._stubs['list_data_items'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListDataItems', - request_serializer=dataset_service.ListDataItemsRequest.serialize, - response_deserializer=dataset_service.ListDataItemsResponse.deserialize, - ) - return self._stubs['list_data_items'] - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - annotation_spec.AnnotationSpec]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an AnnotationSpec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - ~.AnnotationSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/GetAnnotationSpec', - request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - dataset_service.ListAnnotationsResponse]: - r"""Return a callable for the list annotations method over gRPC. - - Lists Annotations belongs to a dataitem - - Returns: - Callable[[~.ListAnnotationsRequest], - ~.ListAnnotationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_annotations' not in self._stubs: - self._stubs['list_annotations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListAnnotations', - request_serializer=dataset_service.ListAnnotationsRequest.serialize, - response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, - ) - return self._stubs['list_annotations'] - - -__all__ = ( - 'DatasetServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py deleted file mode 100644 index d8c7f0a444..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,510 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore -from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import DatasetServiceGrpcTransport - - -class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport): - """gRPC AsyncIO backend transport for DatasetService. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a Dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/CreateDataset', - request_serializer=dataset_service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - Awaitable[dataset.Dataset]]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a Dataset. - - Returns: - Callable[[~.GetDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/GetDataset', - request_serializer=dataset_service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - Awaitable[gca_dataset.Dataset]]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a Dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/UpdateDataset', - request_serializer=dataset_service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - Awaitable[dataset_service.ListDatasetsResponse]]: - r"""Return a callable for the list datasets method over gRPC. - - Lists Datasets in a Location. - - Returns: - Callable[[~.ListDatasetsRequest], - Awaitable[~.ListDatasetsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListDatasets', - request_serializer=dataset_service.ListDatasetsRequest.serialize, - response_deserializer=dataset_service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a Dataset. - - Returns: - Callable[[~.DeleteDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/DeleteDataset', - request_serializer=dataset_service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a Dataset. - - Returns: - Callable[[~.ImportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ImportData', - request_serializer=dataset_service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export data method over gRPC. - - Exports data from a Dataset. - - Returns: - Callable[[~.ExportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ExportData', - request_serializer=dataset_service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - Awaitable[dataset_service.ListDataItemsResponse]]: - r"""Return a callable for the list data items method over gRPC. - - Lists DataItems in a Dataset. - - Returns: - Callable[[~.ListDataItemsRequest], - Awaitable[~.ListDataItemsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_items' not in self._stubs: - self._stubs['list_data_items'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListDataItems', - request_serializer=dataset_service.ListDataItemsRequest.serialize, - response_deserializer=dataset_service.ListDataItemsResponse.deserialize, - ) - return self._stubs['list_data_items'] - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - Awaitable[annotation_spec.AnnotationSpec]]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an AnnotationSpec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - Awaitable[~.AnnotationSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/GetAnnotationSpec', - request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - Awaitable[dataset_service.ListAnnotationsResponse]]: - r"""Return a callable for the list annotations method over gRPC. - - Lists Annotations belongs to a dataitem - - Returns: - Callable[[~.ListAnnotationsRequest], - Awaitable[~.ListAnnotationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_annotations' not in self._stubs: - self._stubs['list_annotations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListAnnotations', - request_serializer=dataset_service.ListAnnotationsRequest.serialize, - response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, - ) - return self._stubs['list_annotations'] - - -__all__ = ( - 'DatasetServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py deleted file mode 100644 index 7db43e768e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import EndpointServiceClient -from .async_client import EndpointServiceAsyncClient - -__all__ = ( - 'EndpointServiceClient', - 'EndpointServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py deleted file mode 100644 index 0d22bf8ce6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ /dev/null @@ -1,860 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport -from .client import EndpointServiceClient - - -class EndpointServiceAsyncClient: - """""" - - _client: EndpointServiceClient - - DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(EndpointServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) - model_path = staticmethod(EndpointServiceClient.model_path) - parse_model_path = staticmethod(EndpointServiceClient.parse_model_path) - common_billing_account_path = staticmethod(EndpointServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(EndpointServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(EndpointServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(EndpointServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(EndpointServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(EndpointServiceClient.parse_common_organization_path) - common_project_path = staticmethod(EndpointServiceClient.common_project_path) - parse_common_project_path = staticmethod(EndpointServiceClient.parse_common_project_path) - common_location_path = staticmethod(EndpointServiceClient.common_location_path) - parse_common_location_path = staticmethod(EndpointServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceAsyncClient: The constructed client. - """ - return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceAsyncClient: The constructed client. - """ - return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> EndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - EndpointServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, EndpointServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.EndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = EndpointServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_endpoint(self, - request: endpoint_service.CreateEndpointRequest = None, - *, - parent: str = None, - endpoint: gca_endpoint.Endpoint = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an Endpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest`): - The request object. Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Endpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`): - Required. The Endpoint to create. - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.CreateEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if endpoint is not None: - request.endpoint = endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_endpoint.Endpoint, - metadata_type=endpoint_service.CreateEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_endpoint(self, - request: endpoint_service.GetEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> endpoint.Endpoint: - r"""Gets an Endpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetEndpointRequest`): - The request object. Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] - name (:class:`str`): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.GetEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_endpoints(self, - request: endpoint_service.ListEndpointsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEndpointsAsyncPager: - r"""Lists Endpoints in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest`): - The request object. Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsAsyncPager: - Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.ListEndpointsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_endpoints, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListEndpointsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_endpoint(self, - request: endpoint_service.UpdateEndpointRequest = None, - *, - endpoint: gca_endpoint.Endpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_endpoint.Endpoint: - r"""Updates an Endpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest`): - The request object. Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`): - Required. The Endpoint which replaces - the resource on the server. - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.UpdateEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint.name", request.endpoint.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_endpoint(self, - request: endpoint_service.DeleteEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Endpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest`): - The request object. Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. - name (:class:`str`): - Required. The name of the Endpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.DeleteEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def deploy_model(self, - request: endpoint_service.DeployModelRequest = None, - *, - endpoint: str = None, - deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeployModelRequest`): - The request object. Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - endpoint (:class:`str`): - Required. The name of the Endpoint resource into which - to deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model (:class:`google.cloud.aiplatform_v1beta1.types.DeployedModel`): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - - This corresponds to the ``deployed_model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]`): - A map from a DeployedModel's ID to the percentage of - this Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the - just being deployed Model, a "0" should be used, and the - actual ID of the new DeployedModel will be filled in its - place by this method. The traffic percentage values must - add up to 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - is not updated. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` - Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.DeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model is not None: - request.deployed_model = deployed_model - - if traffic_split: - request.traffic_split.update(traffic_split) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.deploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - endpoint_service.DeployModelResponse, - metadata_type=endpoint_service.DeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def undeploy_model(self, - request: endpoint_service.UndeployModelRequest = None, - *, - endpoint: str = None, - deployed_model_id: str = None, - traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UndeployModelRequest`): - The request object. Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - endpoint (:class:`str`): - Required. The name of the Endpoint resource from which - to undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - Required. The ID of the DeployedModel - to be undeployed from the Endpoint. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]`): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is - being undeployed from the Endpoint, the - [Endpoint.traffic_split] will always end up empty when - this call returns. A DeployedModel will be successfully - undeployed only if it doesn't have any traffic assigned - to it when this method executes, or if this field - unassigns any traffic to it. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` - Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.UndeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - if traffic_split: - request.traffic_split.update(traffic_split) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undeploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - endpoint_service.UndeployModelResponse, - metadata_type=endpoint_service.UndeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "EndpointServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py deleted file mode 100644 index 9ad6408d14..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ /dev/null @@ -1,1054 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import EndpointServiceGrpcTransport -from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport - - -class EndpointServiceClientMeta(type): - """Metaclass for the EndpointService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] - _transport_registry["grpc"] = EndpointServiceGrpcTransport - _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[EndpointServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class EndpointServiceClient(metaclass=EndpointServiceClientMeta): - """""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> EndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - EndpointServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, EndpointServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, EndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, EndpointServiceTransport): - # transport is a EndpointServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_endpoint(self, - request: endpoint_service.CreateEndpointRequest = None, - *, - parent: str = None, - endpoint: gca_endpoint.Endpoint = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an Endpoint. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest): - The request object. Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. - parent (str): - Required. The resource name of the Location to create - the Endpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): - Required. The Endpoint to create. - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.CreateEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.CreateEndpointRequest): - request = endpoint_service.CreateEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if endpoint is not None: - request.endpoint = endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_endpoint.Endpoint, - metadata_type=endpoint_service.CreateEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - def get_endpoint(self, - request: endpoint_service.GetEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> endpoint.Endpoint: - r"""Gets an Endpoint. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetEndpointRequest): - The request object. Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] - name (str): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.GetEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.GetEndpointRequest): - request = endpoint_service.GetEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_endpoints(self, - request: endpoint_service.ListEndpointsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEndpointsPager: - r"""Lists Endpoints in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): - The request object. Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - parent (str): - Required. The resource name of the Location from which - to list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsPager: - Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.ListEndpointsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.ListEndpointsRequest): - request = endpoint_service.ListEndpointsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_endpoints] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListEndpointsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_endpoint(self, - request: endpoint_service.UpdateEndpointRequest = None, - *, - endpoint: gca_endpoint.Endpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_endpoint.Endpoint: - r"""Updates an Endpoint. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest): - The request object. Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): - Required. The Endpoint which replaces - the resource on the server. - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.UpdateEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.UpdateEndpointRequest): - request = endpoint_service.UpdateEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint.name", request.endpoint.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_endpoint(self, - request: endpoint_service.DeleteEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Endpoint. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest): - The request object. Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. - name (str): - Required. The name of the Endpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.DeleteEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.DeleteEndpointRequest): - request = endpoint_service.DeleteEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def deploy_model(self, - request: endpoint_service.DeployModelRequest = None, - *, - endpoint: str = None, - deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeployModelRequest): - The request object. Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - endpoint (str): - Required. The name of the Endpoint resource into which - to deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - - This corresponds to the ``deployed_model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]): - A map from a DeployedModel's ID to the percentage of - this Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the - just being deployed Model, a "0" should be used, and the - actual ID of the new DeployedModel will be filled in its - place by this method. The traffic percentage values must - add up to 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - is not updated. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` - Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.DeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.DeployModelRequest): - request = endpoint_service.DeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model is not None: - request.deployed_model = deployed_model - if traffic_split is not None: - request.traffic_split = traffic_split - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.deploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - endpoint_service.DeployModelResponse, - metadata_type=endpoint_service.DeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - def undeploy_model(self, - request: endpoint_service.UndeployModelRequest = None, - *, - endpoint: str = None, - deployed_model_id: str = None, - traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UndeployModelRequest): - The request object. Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - endpoint (str): - Required. The name of the Endpoint resource from which - to undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - Required. The ID of the DeployedModel - to be undeployed from the Endpoint. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is - being undeployed from the Endpoint, the - [Endpoint.traffic_split] will always end up empty when - this call returns. A DeployedModel will be successfully - undeployed only if it doesn't have any traffic assigned - to it when this method executes, or if this field - unassigns any traffic to it. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` - Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.UndeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.UndeployModelRequest): - request = endpoint_service.UndeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - if traffic_split is not None: - request.traffic_split = traffic_split - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undeploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - endpoint_service.UndeployModelResponse, - metadata_type=endpoint_service.UndeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "EndpointServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py deleted file mode 100644 index 80c94eacf9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service - - -class ListEndpointsPager: - """A pager for iterating through ``list_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``endpoints`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListEndpoints`` requests and continue to iterate - through the ``endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., endpoint_service.ListEndpointsResponse], - request: endpoint_service.ListEndpointsRequest, - response: endpoint_service.ListEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = endpoint_service.ListEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[endpoint_service.ListEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[endpoint.Endpoint]: - for page in self.pages: - yield from page.endpoints - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEndpointsAsyncPager: - """A pager for iterating through ``list_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``endpoints`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListEndpoints`` requests and continue to iterate - through the ``endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[endpoint_service.ListEndpointsResponse]], - request: endpoint_service.ListEndpointsRequest, - response: endpoint_service.ListEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = endpoint_service.ListEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[endpoint_service.ListEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[endpoint.Endpoint]: - async def async_generator(): - async for page in self.pages: - for response in page.endpoints: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py deleted file mode 100644 index a062fc074c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import EndpointServiceTransport -from .grpc import EndpointServiceGrpcTransport -from .grpc_asyncio import EndpointServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] -_transport_registry['grpc'] = EndpointServiceGrpcTransport -_transport_registry['grpc_asyncio'] = EndpointServiceGrpcAsyncIOTransport - -__all__ = ( - 'EndpointServiceTransport', - 'EndpointServiceGrpcTransport', - 'EndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py deleted file mode 100644 index f41b7dab50..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py +++ /dev/null @@ -1,261 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class EndpointServiceTransport(abc.ABC): - """Abstract transport class for EndpointService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_endpoint: gapic_v1.method.wrap_method( - self.create_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.get_endpoint: gapic_v1.method.wrap_method( - self.get_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.list_endpoints: gapic_v1.method.wrap_method( - self.list_endpoints, - default_timeout=5.0, - client_info=client_info, - ), - self.update_endpoint: gapic_v1.method.wrap_method( - self.update_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_endpoint: gapic_v1.method.wrap_method( - self.delete_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.deploy_model: gapic_v1.method.wrap_method( - self.deploy_model, - default_timeout=5.0, - client_info=client_info, - ), - self.undeploy_model: gapic_v1.method.wrap_method( - self.undeploy_model, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - Union[ - endpoint.Endpoint, - Awaitable[endpoint.Endpoint] - ]]: - raise NotImplementedError() - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - Union[ - endpoint_service.ListEndpointsResponse, - Awaitable[endpoint_service.ListEndpointsResponse] - ]]: - raise NotImplementedError() - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - Union[ - gca_endpoint.Endpoint, - Awaitable[gca_endpoint.Endpoint] - ]]: - raise NotImplementedError() - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'EndpointServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py deleted file mode 100644 index 03282d4be0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py +++ /dev/null @@ -1,430 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO - - -class EndpointServiceGrpcTransport(EndpointServiceTransport): - """gRPC backend transport for EndpointService. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the create endpoint method over gRPC. - - Creates an Endpoint. - - Returns: - Callable[[~.CreateEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_endpoint' not in self._stubs: - self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/CreateEndpoint', - request_serializer=endpoint_service.CreateEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_endpoint'] - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - endpoint.Endpoint]: - r"""Return a callable for the get endpoint method over gRPC. - - Gets an Endpoint. - - Returns: - Callable[[~.GetEndpointRequest], - ~.Endpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_endpoint' not in self._stubs: - self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/GetEndpoint', - request_serializer=endpoint_service.GetEndpointRequest.serialize, - response_deserializer=endpoint.Endpoint.deserialize, - ) - return self._stubs['get_endpoint'] - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - endpoint_service.ListEndpointsResponse]: - r"""Return a callable for the list endpoints method over gRPC. - - Lists Endpoints in a Location. - - Returns: - Callable[[~.ListEndpointsRequest], - ~.ListEndpointsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_endpoints' not in self._stubs: - self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/ListEndpoints', - request_serializer=endpoint_service.ListEndpointsRequest.serialize, - response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, - ) - return self._stubs['list_endpoints'] - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - gca_endpoint.Endpoint]: - r"""Return a callable for the update endpoint method over gRPC. - - Updates an Endpoint. - - Returns: - Callable[[~.UpdateEndpointRequest], - ~.Endpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_endpoint' not in self._stubs: - self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/UpdateEndpoint', - request_serializer=endpoint_service.UpdateEndpointRequest.serialize, - response_deserializer=gca_endpoint.Endpoint.deserialize, - ) - return self._stubs['update_endpoint'] - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete endpoint method over gRPC. - - Deletes an Endpoint. - - Returns: - Callable[[~.DeleteEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_endpoint' not in self._stubs: - self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/DeleteEndpoint', - request_serializer=endpoint_service.DeleteEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_endpoint'] - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Returns: - Callable[[~.DeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/DeployModel', - request_serializer=endpoint_service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/UndeployModel', - request_serializer=endpoint_service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - -__all__ = ( - 'EndpointServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py deleted file mode 100644 index f5ac9f5a71..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import EndpointServiceGrpcTransport - - -class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport): - """gRPC AsyncIO backend transport for EndpointService. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create endpoint method over gRPC. - - Creates an Endpoint. - - Returns: - Callable[[~.CreateEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_endpoint' not in self._stubs: - self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/CreateEndpoint', - request_serializer=endpoint_service.CreateEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_endpoint'] - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - Awaitable[endpoint.Endpoint]]: - r"""Return a callable for the get endpoint method over gRPC. - - Gets an Endpoint. - - Returns: - Callable[[~.GetEndpointRequest], - Awaitable[~.Endpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_endpoint' not in self._stubs: - self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/GetEndpoint', - request_serializer=endpoint_service.GetEndpointRequest.serialize, - response_deserializer=endpoint.Endpoint.deserialize, - ) - return self._stubs['get_endpoint'] - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - Awaitable[endpoint_service.ListEndpointsResponse]]: - r"""Return a callable for the list endpoints method over gRPC. - - Lists Endpoints in a Location. - - Returns: - Callable[[~.ListEndpointsRequest], - Awaitable[~.ListEndpointsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_endpoints' not in self._stubs: - self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/ListEndpoints', - request_serializer=endpoint_service.ListEndpointsRequest.serialize, - response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, - ) - return self._stubs['list_endpoints'] - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - Awaitable[gca_endpoint.Endpoint]]: - r"""Return a callable for the update endpoint method over gRPC. - - Updates an Endpoint. - - Returns: - Callable[[~.UpdateEndpointRequest], - Awaitable[~.Endpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_endpoint' not in self._stubs: - self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/UpdateEndpoint', - request_serializer=endpoint_service.UpdateEndpointRequest.serialize, - response_deserializer=gca_endpoint.Endpoint.deserialize, - ) - return self._stubs['update_endpoint'] - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete endpoint method over gRPC. - - Deletes an Endpoint. - - Returns: - Callable[[~.DeleteEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_endpoint' not in self._stubs: - self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/DeleteEndpoint', - request_serializer=endpoint_service.DeleteEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_endpoint'] - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Returns: - Callable[[~.DeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/DeployModel', - request_serializer=endpoint_service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/UndeployModel', - request_serializer=endpoint_service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - -__all__ = ( - 'EndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py deleted file mode 100644 index e009ebaec2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import FeaturestoreOnlineServingServiceClient -from .async_client import FeaturestoreOnlineServingServiceAsyncClient - -__all__ = ( - 'FeaturestoreOnlineServingServiceClient', - 'FeaturestoreOnlineServingServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py deleted file mode 100644 index 3f762760bc..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py +++ /dev/null @@ -1,325 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport -from .client import FeaturestoreOnlineServingServiceClient - - -class FeaturestoreOnlineServingServiceAsyncClient: - """A service for serving online feature values.""" - - _client: FeaturestoreOnlineServingServiceClient - - DEFAULT_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_MTLS_ENDPOINT - - entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.entity_type_path) - parse_entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_entity_type_path) - common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_organization_path) - common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_project_path) - parse_common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_project_path) - common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_location_path) - parse_common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceAsyncClient: The constructed client. - """ - return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceAsyncClient: The constructed client. - """ - return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreOnlineServingServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(FeaturestoreOnlineServingServiceClient).get_transport_class, type(FeaturestoreOnlineServingServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, FeaturestoreOnlineServingServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore online serving service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.FeaturestoreOnlineServingServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = FeaturestoreOnlineServingServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def read_feature_values(self, - request: featurestore_online_service.ReadFeatureValuesRequest = None, - *, - entity_type: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore_online_service.ReadFeatureValuesResponse: - r"""Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest`): - The request object. Request message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - entity_type (:class:`str`): - Required. The resource name of the EntityType for the - entity being read. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - "user". - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_online_service.ReadFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_feature_values, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def streaming_read_feature_values(self, - request: featurestore_online_service.StreamingReadFeatureValuesRequest = None, - *, - entity_type: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[featurestore_online_service.ReadFeatureValuesResponse]]: - r"""Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest`): - The request object. Request message for - [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. - entity_type (:class:`str`): - Required. The resource name of the entities' type. Value - format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - "user". - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.streaming_read_feature_values, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreOnlineServingServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py deleted file mode 100644 index 51db2f2c04..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py +++ /dev/null @@ -1,512 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import FeaturestoreOnlineServingServiceGrpcTransport -from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - - -class FeaturestoreOnlineServingServiceClientMeta(type): - """Metaclass for the FeaturestoreOnlineServingService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] - _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport - _transport_registry["grpc_asyncio"] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[FeaturestoreOnlineServingServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class FeaturestoreOnlineServingServiceClient(metaclass=FeaturestoreOnlineServingServiceClientMeta): - """A service for serving online feature values.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreOnlineServingServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreOnlineServingServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: - """Returns a fully-qualified entity_type string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - - @staticmethod - def parse_entity_type_path(path: str) -> Dict[str,str]: - """Parses a entity_type path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, FeaturestoreOnlineServingServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore online serving service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, FeaturestoreOnlineServingServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, FeaturestoreOnlineServingServiceTransport): - # transport is a FeaturestoreOnlineServingServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def read_feature_values(self, - request: featurestore_online_service.ReadFeatureValuesRequest = None, - *, - entity_type: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore_online_service.ReadFeatureValuesResponse: - r"""Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest): - The request object. Request message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - entity_type (str): - Required. The resource name of the EntityType for the - entity being read. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - "user". - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_online_service.ReadFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_online_service.ReadFeatureValuesRequest): - request = featurestore_online_service.ReadFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def streaming_read_feature_values(self, - request: featurestore_online_service.StreamingReadFeatureValuesRequest = None, - *, - entity_type: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[featurestore_online_service.ReadFeatureValuesResponse]: - r"""Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Args: - request (google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest): - The request object. Request message for - [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. - entity_type (str): - Required. The resource name of the entities' type. Value - format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - "user". - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_online_service.StreamingReadFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_online_service.StreamingReadFeatureValuesRequest): - request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.streaming_read_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreOnlineServingServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py deleted file mode 100644 index d1abcd0c43..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import FeaturestoreOnlineServingServiceTransport -from .grpc import FeaturestoreOnlineServingServiceGrpcTransport -from .grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] -_transport_registry['grpc'] = FeaturestoreOnlineServingServiceGrpcTransport -_transport_registry['grpc_asyncio'] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - -__all__ = ( - 'FeaturestoreOnlineServingServiceTransport', - 'FeaturestoreOnlineServingServiceGrpcTransport', - 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py deleted file mode 100644 index 47ba2e7efb..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class FeaturestoreOnlineServingServiceTransport(abc.ABC): - """Abstract transport class for FeaturestoreOnlineServingService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.read_feature_values: gapic_v1.method.wrap_method( - self.read_feature_values, - default_timeout=5.0, - client_info=client_info, - ), - self.streaming_read_feature_values: gapic_v1.method.wrap_method( - self.streaming_read_feature_values, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def read_feature_values(self) -> Callable[ - [featurestore_online_service.ReadFeatureValuesRequest], - Union[ - featurestore_online_service.ReadFeatureValuesResponse, - Awaitable[featurestore_online_service.ReadFeatureValuesResponse] - ]]: - raise NotImplementedError() - - @property - def streaming_read_feature_values(self) -> Callable[ - [featurestore_online_service.StreamingReadFeatureValuesRequest], - Union[ - featurestore_online_service.ReadFeatureValuesResponse, - Awaitable[featurestore_online_service.ReadFeatureValuesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'FeaturestoreOnlineServingServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py deleted file mode 100644 index 83868dcafc..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py +++ /dev/null @@ -1,283 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO - - -class FeaturestoreOnlineServingServiceGrpcTransport(FeaturestoreOnlineServingServiceTransport): - """gRPC backend transport for FeaturestoreOnlineServingService. - - A service for serving online feature values. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def read_feature_values(self) -> Callable[ - [featurestore_online_service.ReadFeatureValuesRequest], - featurestore_online_service.ReadFeatureValuesResponse]: - r"""Return a callable for the read feature values method over gRPC. - - Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Returns: - Callable[[~.ReadFeatureValuesRequest], - ~.ReadFeatureValuesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_feature_values' not in self._stubs: - self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues', - request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['read_feature_values'] - - @property - def streaming_read_feature_values(self) -> Callable[ - [featurestore_online_service.StreamingReadFeatureValuesRequest], - featurestore_online_service.ReadFeatureValuesResponse]: - r"""Return a callable for the streaming read feature values method over gRPC. - - Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Returns: - Callable[[~.StreamingReadFeatureValuesRequest], - ~.ReadFeatureValuesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'streaming_read_feature_values' not in self._stubs: - self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', - request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['streaming_read_feature_values'] - - -__all__ = ( - 'FeaturestoreOnlineServingServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py deleted file mode 100644 index e040cb7a76..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,287 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import FeaturestoreOnlineServingServiceGrpcTransport - - -class FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(FeaturestoreOnlineServingServiceTransport): - """gRPC AsyncIO backend transport for FeaturestoreOnlineServingService. - - A service for serving online feature values. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def read_feature_values(self) -> Callable[ - [featurestore_online_service.ReadFeatureValuesRequest], - Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: - r"""Return a callable for the read feature values method over gRPC. - - Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Returns: - Callable[[~.ReadFeatureValuesRequest], - Awaitable[~.ReadFeatureValuesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_feature_values' not in self._stubs: - self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues', - request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['read_feature_values'] - - @property - def streaming_read_feature_values(self) -> Callable[ - [featurestore_online_service.StreamingReadFeatureValuesRequest], - Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: - r"""Return a callable for the streaming read feature values method over gRPC. - - Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Returns: - Callable[[~.StreamingReadFeatureValuesRequest], - Awaitable[~.ReadFeatureValuesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'streaming_read_feature_values' not in self._stubs: - self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', - request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['streaming_read_feature_values'] - - -__all__ = ( - 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py deleted file mode 100644 index 81716ce8fe..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import FeaturestoreServiceClient -from .async_client import FeaturestoreServiceAsyncClient - -__all__ = ( - 'FeaturestoreServiceClient', - 'FeaturestoreServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py deleted file mode 100644 index 52057b0876..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ /dev/null @@ -1,2060 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport -from .client import FeaturestoreServiceClient - - -class FeaturestoreServiceAsyncClient: - """The service that handles CRUD and List for resources for - Featurestore. - """ - - _client: FeaturestoreServiceClient - - DEFAULT_ENDPOINT = FeaturestoreServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = FeaturestoreServiceClient.DEFAULT_MTLS_ENDPOINT - - entity_type_path = staticmethod(FeaturestoreServiceClient.entity_type_path) - parse_entity_type_path = staticmethod(FeaturestoreServiceClient.parse_entity_type_path) - feature_path = staticmethod(FeaturestoreServiceClient.feature_path) - parse_feature_path = staticmethod(FeaturestoreServiceClient.parse_feature_path) - featurestore_path = staticmethod(FeaturestoreServiceClient.featurestore_path) - parse_featurestore_path = staticmethod(FeaturestoreServiceClient.parse_featurestore_path) - common_billing_account_path = staticmethod(FeaturestoreServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(FeaturestoreServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(FeaturestoreServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(FeaturestoreServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(FeaturestoreServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(FeaturestoreServiceClient.parse_common_organization_path) - common_project_path = staticmethod(FeaturestoreServiceClient.common_project_path) - parse_common_project_path = staticmethod(FeaturestoreServiceClient.parse_common_project_path) - common_location_path = staticmethod(FeaturestoreServiceClient.common_location_path) - parse_common_location_path = staticmethod(FeaturestoreServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceAsyncClient: The constructed client. - """ - return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceAsyncClient: The constructed client. - """ - return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(FeaturestoreServiceClient).get_transport_class, type(FeaturestoreServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, FeaturestoreServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.FeaturestoreServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = FeaturestoreServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_featurestore(self, - request: featurestore_service.CreateFeaturestoreRequest = None, - *, - parent: str = None, - featurestore: gca_featurestore.Featurestore = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new Featurestore in a given project and - location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest`): - The request object. Request message for - [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. - parent (:class:`str`): - Required. The resource name of the Location to create - Featurestores. Format: - ``projects/{project}/locations/{location}'`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): - Required. The Featurestore to create. - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.CreateFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if featurestore is not None: - request.featurestore = featurestore - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_featurestore, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_featurestore(self, - request: featurestore_service.GetFeaturestoreRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore.Featurestore: - r"""Gets details of a single Featurestore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest`): - The request object. Request message for - [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. - name (:class:`str`): - Required. The name of the - Featurestore resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Featurestore: - Featurestore configuration - information on how the Featurestore is - configured. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.GetFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_featurestore, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_featurestores(self, - request: featurestore_service.ListFeaturestoresRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturestoresAsyncPager: - r"""Lists Featurestores in a given project and location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest`): - The request object. Request message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - parent (:class:`str`): - Required. The resource name of the Location to list - Featurestores. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager: - Response message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ListFeaturestoresRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_featurestores, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListFeaturestoresAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_featurestore(self, - request: featurestore_service.UpdateFeaturestoreRequest = None, - *, - featurestore: gca_featurestore.Featurestore = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates the parameters of a single Featurestore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest`): - The request object. Request message for - [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. - featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): - Required. The Featurestore's ``name`` field is used to - identify the Featurestore to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Field mask is used to specify the fields to be - overwritten in the Featurestore resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.UpdateFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_featurestore, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore.name", request.featurestore.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_featurestore(self, - request: featurestore_service.DeleteFeaturestoreRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest`): - The request object. Request message for - [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. - name (:class:`str`): - Required. The name of the Featurestore to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.DeleteFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_featurestore, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_entity_type(self, - request: featurestore_service.CreateEntityTypeRequest = None, - *, - parent: str = None, - entity_type: gca_entity_type.EntityType = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new EntityType in a given Featurestore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest`): - The request object. Request message for - [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. - parent (:class:`str`): - Required. The resource name of the Featurestore to - create EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): - The EntityType to create. - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and - have stored information about. For example, driver is - an entity type, and driver0 is an instance of an - entity type driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.CreateEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_entity_type, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_entity_type.EntityType, - metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_entity_type(self, - request: featurestore_service.GetEntityTypeRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> entity_type.EntityType: - r"""Gets details of a single EntityType. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest`): - The request object. Request message for - [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. - name (:class:`str`): - Required. The name of the EntityType resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.GetEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_entity_type, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_entity_types(self, - request: featurestore_service.ListEntityTypesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEntityTypesAsyncPager: - r"""Lists EntityTypes in a given Featurestore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest`): - The request object. Request message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - parent (:class:`str`): - Required. The resource name of the Featurestore to list - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesAsyncPager: - Response message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ListEntityTypesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_entity_types, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListEntityTypesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_entity_type(self, - request: featurestore_service.UpdateEntityTypeRequest = None, - *, - entity_type: gca_entity_type.EntityType = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_entity_type.EntityType: - r"""Updates the parameters of a single EntityType. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest`): - The request object. Request message for - [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. - entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): - Required. The EntityType's ``name`` field is used to - identify the EntityType to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Field mask is used to specify the fields to be - overwritten in the EntityType resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.UpdateEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_entity_type, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type.name", request.entity_type.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_entity_type(self, - request: featurestore_service.DeleteEntityTypeRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest`): - The request object. Request message for - [FeaturestoreService.DeleteEntityTypes][]. - name (:class:`str`): - Required. The name of the EntityType to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.DeleteEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_entity_type, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_feature(self, - request: featurestore_service.CreateFeatureRequest = None, - *, - parent: str = None, - feature: gca_feature.Feature = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new Feature in a given EntityType. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest`): - The request object. Request message for - [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. - parent (:class:`str`): - Required. The resource name of the EntityType to create - a Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): - Required. The Feature to create. - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. - For example, apple is an entity type, and color is a - feature that describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.CreateFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if feature is not None: - request.feature = feature - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_feature, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_feature.Feature, - metadata_type=featurestore_service.CreateFeatureOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_create_features(self, - request: featurestore_service.BatchCreateFeaturesRequest = None, - *, - parent: str = None, - requests: Sequence[featurestore_service.CreateFeatureRequest] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a batch of Features in a given EntityType. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest`): - The request object. Request message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - parent (:class:`str`): - Required. The resource name of the EntityType to create - the batch of Features under. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]`): - Required. The request message specifying the Features to - create. All Features must be created under the same - parent EntityType. The ``parent`` field in each child - request message can be omitted. If ``parent`` is set in - a child request, then the value must match the - ``parent`` value in this request message. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` - Response message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.BatchCreateFeaturesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests: - request.requests.extend(requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_create_features, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.BatchCreateFeaturesResponse, - metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_feature(self, - request: featurestore_service.GetFeatureRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> feature.Feature: - r"""Gets details of a single Feature. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetFeatureRequest`): - The request object. Request message for - [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. - name (:class:`str`): - Required. The name of the Feature resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.GetFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_feature, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_features(self, - request: featurestore_service.ListFeaturesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturesAsyncPager: - r"""Lists Features in a given EntityType. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest`): - The request object. Request message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - parent (:class:`str`): - Required. The resource name of the Location to list - Features. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesAsyncPager: - Response message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ListFeaturesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_features, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListFeaturesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_feature(self, - request: featurestore_service.UpdateFeatureRequest = None, - *, - feature: gca_feature.Feature = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_feature.Feature: - r"""Updates the parameters of a single Feature. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest`): - The request object. Request message for - [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. - feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): - Required. The Feature's ``name`` field is used to - identify the Feature to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Field mask is used to specify the fields to be - overwritten in the Features resource by the update. The - fields specified in the update_mask are relative to the - resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.UpdateFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if feature is not None: - request.feature = feature - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_feature, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("feature.name", request.feature.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_feature(self, - request: featurestore_service.DeleteFeatureRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single Feature. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest`): - The request object. Request message for - [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. - name (:class:`str`): - Required. The name of the Features to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.DeleteFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_feature, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def import_feature_values(self, - request: featurestore_service.ImportFeatureValuesRequest = None, - *, - entity_type: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest`): - The request object. Request message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - entity_type (:class:`str`): - Required. The resource name of the EntityType grouping - the Features for which values are being imported. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` - Response message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ImportFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.import_feature_values, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.ImportFeatureValuesResponse, - metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_read_feature_values(self, - request: featurestore_service.BatchReadFeatureValuesRequest = None, - *, - featurestore: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest`): - The request object. Request message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - (- Next Id: 6 -) - featurestore (:class:`str`): - Required. The resource name of the Featurestore from - which to query Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` - Response message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.BatchReadFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_read_feature_values, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore", request.featurestore), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.BatchReadFeatureValuesResponse, - metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_feature_values(self, - request: featurestore_service.ExportFeatureValuesRequest = None, - *, - entity_type: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports Feature values from all the entities of a - target EntityType. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest`): - The request object. Request message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - entity_type (:class:`str`): - Required. The resource name of the EntityType from which - to export Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` - Response message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ExportFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_feature_values, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.ExportFeatureValuesResponse, - metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - async def search_features(self, - request: featurestore_service.SearchFeaturesRequest = None, - *, - location: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchFeaturesAsyncPager: - r"""Searches Features matching a query in a given - project. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest`): - The request object. Request message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - location (:class:`str`): - Required. The resource name of the Location to search - Features. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``location`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager: - Response message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([location]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.SearchFeaturesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if location is not None: - request.location = location - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_features, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("location", request.location), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchFeaturesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py deleted file mode 100644 index 7fdfc4238d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ /dev/null @@ -1,2265 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import FeaturestoreServiceGrpcTransport -from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport - - -class FeaturestoreServiceClientMeta(type): - """Metaclass for the FeaturestoreService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] - _transport_registry["grpc"] = FeaturestoreServiceGrpcTransport - _transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[FeaturestoreServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class FeaturestoreServiceClient(metaclass=FeaturestoreServiceClientMeta): - """The service that handles CRUD and List for resources for - Featurestore. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: - """Returns a fully-qualified entity_type string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - - @staticmethod - def parse_entity_type_path(path: str) -> Dict[str,str]: - """Parses a entity_type path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def feature_path(project: str,location: str,featurestore: str,entity_type: str,feature: str,) -> str: - """Returns a fully-qualified feature string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) - - @staticmethod - def parse_feature_path(path: str) -> Dict[str,str]: - """Parses a feature path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)/features/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def featurestore_path(project: str,location: str,featurestore: str,) -> str: - """Returns a fully-qualified featurestore string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) - - @staticmethod - def parse_featurestore_path(path: str) -> Dict[str,str]: - """Parses a featurestore path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, FeaturestoreServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, FeaturestoreServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, FeaturestoreServiceTransport): - # transport is a FeaturestoreServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_featurestore(self, - request: featurestore_service.CreateFeaturestoreRequest = None, - *, - parent: str = None, - featurestore: gca_featurestore.Featurestore = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a new Featurestore in a given project and - location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest): - The request object. Request message for - [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. - parent (str): - Required. The resource name of the Location to create - Featurestores. Format: - ``projects/{project}/locations/{location}'`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): - Required. The Featurestore to create. - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.CreateFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.CreateFeaturestoreRequest): - request = featurestore_service.CreateFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if featurestore is not None: - request.featurestore = featurestore - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - def get_featurestore(self, - request: featurestore_service.GetFeaturestoreRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore.Featurestore: - r"""Gets details of a single Featurestore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest): - The request object. Request message for - [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. - name (str): - Required. The name of the - Featurestore resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Featurestore: - Featurestore configuration - information on how the Featurestore is - configured. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.GetFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.GetFeaturestoreRequest): - request = featurestore_service.GetFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_featurestores(self, - request: featurestore_service.ListFeaturestoresRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturestoresPager: - r"""Lists Featurestores in a given project and location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): - The request object. Request message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - parent (str): - Required. The resource name of the Location to list - Featurestores. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresPager: - Response message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ListFeaturestoresRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ListFeaturestoresRequest): - request = featurestore_service.ListFeaturestoresRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_featurestores] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListFeaturestoresPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_featurestore(self, - request: featurestore_service.UpdateFeaturestoreRequest = None, - *, - featurestore: gca_featurestore.Featurestore = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates the parameters of a single Featurestore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest): - The request object. Request message for - [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. - featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): - Required. The Featurestore's ``name`` field is used to - identify the Featurestore to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be - overwritten in the Featurestore resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.UpdateFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.UpdateFeaturestoreRequest): - request = featurestore_service.UpdateFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore.name", request.featurestore.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_featurestore(self, - request: featurestore_service.DeleteFeaturestoreRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest): - The request object. Request message for - [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. - name (str): - Required. The name of the Featurestore to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.DeleteFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.DeleteFeaturestoreRequest): - request = featurestore_service.DeleteFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_entity_type(self, - request: featurestore_service.CreateEntityTypeRequest = None, - *, - parent: str = None, - entity_type: gca_entity_type.EntityType = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a new EntityType in a given Featurestore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest): - The request object. Request message for - [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. - parent (str): - Required. The resource name of the Featurestore to - create EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): - The EntityType to create. - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and - have stored information about. For example, driver is - an entity type, and driver0 is an instance of an - entity type driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.CreateEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.CreateEntityTypeRequest): - request = featurestore_service.CreateEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_entity_type.EntityType, - metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, - ) - - # Done; return the response. - return response - - def get_entity_type(self, - request: featurestore_service.GetEntityTypeRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> entity_type.EntityType: - r"""Gets details of a single EntityType. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest): - The request object. Request message for - [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. - name (str): - Required. The name of the EntityType resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.GetEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.GetEntityTypeRequest): - request = featurestore_service.GetEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_entity_types(self, - request: featurestore_service.ListEntityTypesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEntityTypesPager: - r"""Lists EntityTypes in a given Featurestore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): - The request object. Request message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - parent (str): - Required. The resource name of the Featurestore to list - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesPager: - Response message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ListEntityTypesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ListEntityTypesRequest): - request = featurestore_service.ListEntityTypesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_entity_types] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListEntityTypesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_entity_type(self, - request: featurestore_service.UpdateEntityTypeRequest = None, - *, - entity_type: gca_entity_type.EntityType = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_entity_type.EntityType: - r"""Updates the parameters of a single EntityType. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest): - The request object. Request message for - [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. - entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): - Required. The EntityType's ``name`` field is used to - identify the EntityType to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be - overwritten in the EntityType resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.UpdateEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.UpdateEntityTypeRequest): - request = featurestore_service.UpdateEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type.name", request.entity_type.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_entity_type(self, - request: featurestore_service.DeleteEntityTypeRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest): - The request object. Request message for - [FeaturestoreService.DeleteEntityTypes][]. - name (str): - Required. The name of the EntityType to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.DeleteEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.DeleteEntityTypeRequest): - request = featurestore_service.DeleteEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_feature(self, - request: featurestore_service.CreateFeatureRequest = None, - *, - parent: str = None, - feature: gca_feature.Feature = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a new Feature in a given EntityType. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest): - The request object. Request message for - [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. - parent (str): - Required. The resource name of the EntityType to create - a Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature (google.cloud.aiplatform_v1beta1.types.Feature): - Required. The Feature to create. - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. - For example, apple is an entity type, and color is a - feature that describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.CreateFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.CreateFeatureRequest): - request = featurestore_service.CreateFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if feature is not None: - request.feature = feature - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_feature.Feature, - metadata_type=featurestore_service.CreateFeatureOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_create_features(self, - request: featurestore_service.BatchCreateFeaturesRequest = None, - *, - parent: str = None, - requests: Sequence[featurestore_service.CreateFeatureRequest] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a batch of Features in a given EntityType. - - Args: - request (google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest): - The request object. Request message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - parent (str): - Required. The resource name of the EntityType to create - the batch of Features under. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): - Required. The request message specifying the Features to - create. All Features must be created under the same - parent EntityType. The ``parent`` field in each child - request message can be omitted. If ``parent`` is set in - a child request, then the value must match the - ``parent`` value in this request message. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` - Response message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.BatchCreateFeaturesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.BatchCreateFeaturesRequest): - request = featurestore_service.BatchCreateFeaturesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests is not None: - request.requests = requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_create_features] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.BatchCreateFeaturesResponse, - metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, - ) - - # Done; return the response. - return response - - def get_feature(self, - request: featurestore_service.GetFeatureRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> feature.Feature: - r"""Gets details of a single Feature. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetFeatureRequest): - The request object. Request message for - [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. - name (str): - Required. The name of the Feature resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.GetFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.GetFeatureRequest): - request = featurestore_service.GetFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_features(self, - request: featurestore_service.ListFeaturesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturesPager: - r"""Lists Features in a given EntityType. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): - The request object. Request message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - parent (str): - Required. The resource name of the Location to list - Features. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesPager: - Response message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ListFeaturesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ListFeaturesRequest): - request = featurestore_service.ListFeaturesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_features] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListFeaturesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_feature(self, - request: featurestore_service.UpdateFeatureRequest = None, - *, - feature: gca_feature.Feature = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_feature.Feature: - r"""Updates the parameters of a single Feature. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest): - The request object. Request message for - [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. - feature (google.cloud.aiplatform_v1beta1.types.Feature): - Required. The Feature's ``name`` field is used to - identify the Feature to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be - overwritten in the Features resource by the update. The - fields specified in the update_mask are relative to the - resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.UpdateFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.UpdateFeatureRequest): - request = featurestore_service.UpdateFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if feature is not None: - request.feature = feature - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("feature.name", request.feature.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_feature(self, - request: featurestore_service.DeleteFeatureRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single Feature. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest): - The request object. Request message for - [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. - name (str): - Required. The name of the Features to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.DeleteFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.DeleteFeatureRequest): - request = featurestore_service.DeleteFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def import_feature_values(self, - request: featurestore_service.ImportFeatureValuesRequest = None, - *, - entity_type: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest): - The request object. Request message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - entity_type (str): - Required. The resource name of the EntityType grouping - the Features for which values are being imported. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` - Response message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ImportFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ImportFeatureValuesRequest): - request = featurestore_service.ImportFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.import_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.ImportFeatureValuesResponse, - metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_read_feature_values(self, - request: featurestore_service.BatchReadFeatureValuesRequest = None, - *, - featurestore: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Args: - request (google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest): - The request object. Request message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - (- Next Id: 6 -) - featurestore (str): - Required. The resource name of the Featurestore from - which to query Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` - Response message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.BatchReadFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.BatchReadFeatureValuesRequest): - request = featurestore_service.BatchReadFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_read_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore", request.featurestore), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.BatchReadFeatureValuesResponse, - metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - def export_feature_values(self, - request: featurestore_service.ExportFeatureValuesRequest = None, - *, - entity_type: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports Feature values from all the entities of a - target EntityType. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest): - The request object. Request message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - entity_type (str): - Required. The resource name of the EntityType from which - to export Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` - Response message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ExportFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ExportFeatureValuesRequest): - request = featurestore_service.ExportFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.ExportFeatureValuesResponse, - metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - def search_features(self, - request: featurestore_service.SearchFeaturesRequest = None, - *, - location: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchFeaturesPager: - r"""Searches Features matching a query in a given - project. - - Args: - request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): - The request object. Request message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - location (str): - Required. The resource name of the Location to search - Features. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``location`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesPager: - Response message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([location]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.SearchFeaturesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.SearchFeaturesRequest): - request = featurestore_service.SearchFeaturesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if location is not None: - request.location = location - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_features] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("location", request.location), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchFeaturesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py deleted file mode 100644 index 6a272c4602..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py +++ /dev/null @@ -1,509 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_service - - -class ListFeaturestoresPager: - """A pager for iterating through ``list_featurestores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and - provides an ``__iter__`` method to iterate through its - ``featurestores`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListFeaturestores`` requests and continue to iterate - through the ``featurestores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.ListFeaturestoresResponse], - request: featurestore_service.ListFeaturestoresRequest, - response: featurestore_service.ListFeaturestoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturestoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[featurestore_service.ListFeaturestoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[featurestore.Featurestore]: - for page in self.pages: - yield from page.featurestores - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListFeaturestoresAsyncPager: - """A pager for iterating through ``list_featurestores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``featurestores`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListFeaturestores`` requests and continue to iterate - through the ``featurestores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.ListFeaturestoresResponse]], - request: featurestore_service.ListFeaturestoresRequest, - response: featurestore_service.ListFeaturestoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturestoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[featurestore_service.ListFeaturestoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[featurestore.Featurestore]: - async def async_generator(): - async for page in self.pages: - for response in page.featurestores: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEntityTypesPager: - """A pager for iterating through ``list_entity_types`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``entity_types`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListEntityTypes`` requests and continue to iterate - through the ``entity_types`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.ListEntityTypesResponse], - request: featurestore_service.ListEntityTypesRequest, - response: featurestore_service.ListEntityTypesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListEntityTypesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[featurestore_service.ListEntityTypesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[entity_type.EntityType]: - for page in self.pages: - yield from page.entity_types - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEntityTypesAsyncPager: - """A pager for iterating through ``list_entity_types`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``entity_types`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListEntityTypes`` requests and continue to iterate - through the ``entity_types`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.ListEntityTypesResponse]], - request: featurestore_service.ListEntityTypesRequest, - response: featurestore_service.ListEntityTypesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListEntityTypesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[featurestore_service.ListEntityTypesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[entity_type.EntityType]: - async def async_generator(): - async for page in self.pages: - for response in page.entity_types: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListFeaturesPager: - """A pager for iterating through ``list_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.ListFeaturesResponse], - request: featurestore_service.ListFeaturesRequest, - response: featurestore_service.ListFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[featurestore_service.ListFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[feature.Feature]: - for page in self.pages: - yield from page.features - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListFeaturesAsyncPager: - """A pager for iterating through ``list_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.ListFeaturesResponse]], - request: featurestore_service.ListFeaturesRequest, - response: featurestore_service.ListFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[featurestore_service.ListFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[feature.Feature]: - async def async_generator(): - async for page in self.pages: - for response in page.features: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchFeaturesPager: - """A pager for iterating through ``search_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.SearchFeaturesResponse], - request: featurestore_service.SearchFeaturesRequest, - response: featurestore_service.SearchFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.SearchFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[featurestore_service.SearchFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[feature.Feature]: - for page in self.pages: - yield from page.features - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchFeaturesAsyncPager: - """A pager for iterating through ``search_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.SearchFeaturesResponse]], - request: featurestore_service.SearchFeaturesRequest, - response: featurestore_service.SearchFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.SearchFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[featurestore_service.SearchFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[feature.Feature]: - async def async_generator(): - async for page in self.pages: - for response in page.features: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py deleted file mode 100644 index e8a1ff1b03..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import FeaturestoreServiceTransport -from .grpc import FeaturestoreServiceGrpcTransport -from .grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] -_transport_registry['grpc'] = FeaturestoreServiceGrpcTransport -_transport_registry['grpc_asyncio'] = FeaturestoreServiceGrpcAsyncIOTransport - -__all__ = ( - 'FeaturestoreServiceTransport', - 'FeaturestoreServiceGrpcTransport', - 'FeaturestoreServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py deleted file mode 100644 index 9dca966c09..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py +++ /dev/null @@ -1,446 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class FeaturestoreServiceTransport(abc.ABC): - """Abstract transport class for FeaturestoreService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_featurestore: gapic_v1.method.wrap_method( - self.create_featurestore, - default_timeout=5.0, - client_info=client_info, - ), - self.get_featurestore: gapic_v1.method.wrap_method( - self.get_featurestore, - default_timeout=5.0, - client_info=client_info, - ), - self.list_featurestores: gapic_v1.method.wrap_method( - self.list_featurestores, - default_timeout=5.0, - client_info=client_info, - ), - self.update_featurestore: gapic_v1.method.wrap_method( - self.update_featurestore, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_featurestore: gapic_v1.method.wrap_method( - self.delete_featurestore, - default_timeout=5.0, - client_info=client_info, - ), - self.create_entity_type: gapic_v1.method.wrap_method( - self.create_entity_type, - default_timeout=5.0, - client_info=client_info, - ), - self.get_entity_type: gapic_v1.method.wrap_method( - self.get_entity_type, - default_timeout=5.0, - client_info=client_info, - ), - self.list_entity_types: gapic_v1.method.wrap_method( - self.list_entity_types, - default_timeout=5.0, - client_info=client_info, - ), - self.update_entity_type: gapic_v1.method.wrap_method( - self.update_entity_type, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_entity_type: gapic_v1.method.wrap_method( - self.delete_entity_type, - default_timeout=5.0, - client_info=client_info, - ), - self.create_feature: gapic_v1.method.wrap_method( - self.create_feature, - default_timeout=5.0, - client_info=client_info, - ), - self.batch_create_features: gapic_v1.method.wrap_method( - self.batch_create_features, - default_timeout=5.0, - client_info=client_info, - ), - self.get_feature: gapic_v1.method.wrap_method( - self.get_feature, - default_timeout=5.0, - client_info=client_info, - ), - self.list_features: gapic_v1.method.wrap_method( - self.list_features, - default_timeout=5.0, - client_info=client_info, - ), - self.update_feature: gapic_v1.method.wrap_method( - self.update_feature, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_feature: gapic_v1.method.wrap_method( - self.delete_feature, - default_timeout=5.0, - client_info=client_info, - ), - self.import_feature_values: gapic_v1.method.wrap_method( - self.import_feature_values, - default_timeout=5.0, - client_info=client_info, - ), - self.batch_read_feature_values: gapic_v1.method.wrap_method( - self.batch_read_feature_values, - default_timeout=5.0, - client_info=client_info, - ), - self.export_feature_values: gapic_v1.method.wrap_method( - self.export_feature_values, - default_timeout=None, - client_info=client_info, - ), - self.search_features: gapic_v1.method.wrap_method( - self.search_features, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_featurestore(self) -> Callable[ - [featurestore_service.CreateFeaturestoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_featurestore(self) -> Callable[ - [featurestore_service.GetFeaturestoreRequest], - Union[ - featurestore.Featurestore, - Awaitable[featurestore.Featurestore] - ]]: - raise NotImplementedError() - - @property - def list_featurestores(self) -> Callable[ - [featurestore_service.ListFeaturestoresRequest], - Union[ - featurestore_service.ListFeaturestoresResponse, - Awaitable[featurestore_service.ListFeaturestoresResponse] - ]]: - raise NotImplementedError() - - @property - def update_featurestore(self) -> Callable[ - [featurestore_service.UpdateFeaturestoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_featurestore(self) -> Callable[ - [featurestore_service.DeleteFeaturestoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_entity_type(self) -> Callable[ - [featurestore_service.CreateEntityTypeRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_entity_type(self) -> Callable[ - [featurestore_service.GetEntityTypeRequest], - Union[ - entity_type.EntityType, - Awaitable[entity_type.EntityType] - ]]: - raise NotImplementedError() - - @property - def list_entity_types(self) -> Callable[ - [featurestore_service.ListEntityTypesRequest], - Union[ - featurestore_service.ListEntityTypesResponse, - Awaitable[featurestore_service.ListEntityTypesResponse] - ]]: - raise NotImplementedError() - - @property - def update_entity_type(self) -> Callable[ - [featurestore_service.UpdateEntityTypeRequest], - Union[ - gca_entity_type.EntityType, - Awaitable[gca_entity_type.EntityType] - ]]: - raise NotImplementedError() - - @property - def delete_entity_type(self) -> Callable[ - [featurestore_service.DeleteEntityTypeRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_feature(self) -> Callable[ - [featurestore_service.CreateFeatureRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_create_features(self) -> Callable[ - [featurestore_service.BatchCreateFeaturesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_feature(self) -> Callable[ - [featurestore_service.GetFeatureRequest], - Union[ - feature.Feature, - Awaitable[feature.Feature] - ]]: - raise NotImplementedError() - - @property - def list_features(self) -> Callable[ - [featurestore_service.ListFeaturesRequest], - Union[ - featurestore_service.ListFeaturesResponse, - Awaitable[featurestore_service.ListFeaturesResponse] - ]]: - raise NotImplementedError() - - @property - def update_feature(self) -> Callable[ - [featurestore_service.UpdateFeatureRequest], - Union[ - gca_feature.Feature, - Awaitable[gca_feature.Feature] - ]]: - raise NotImplementedError() - - @property - def delete_feature(self) -> Callable[ - [featurestore_service.DeleteFeatureRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def import_feature_values(self) -> Callable[ - [featurestore_service.ImportFeatureValuesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_read_feature_values(self) -> Callable[ - [featurestore_service.BatchReadFeatureValuesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_feature_values(self) -> Callable[ - [featurestore_service.ExportFeatureValuesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def search_features(self) -> Callable[ - [featurestore_service.SearchFeaturesRequest], - Union[ - featurestore_service.SearchFeaturesResponse, - Awaitable[featurestore_service.SearchFeaturesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'FeaturestoreServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py deleted file mode 100644 index 771cbf2737..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py +++ /dev/null @@ -1,803 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.longrunning import operations_pb2 # type: ignore -from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO - - -class FeaturestoreServiceGrpcTransport(FeaturestoreServiceTransport): - """gRPC backend transport for FeaturestoreService. - - The service that handles CRUD and List for resources for - Featurestore. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_featurestore(self) -> Callable[ - [featurestore_service.CreateFeaturestoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the create featurestore method over gRPC. - - Creates a new Featurestore in a given project and - location. - - Returns: - Callable[[~.CreateFeaturestoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_featurestore' not in self._stubs: - self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore', - request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_featurestore'] - - @property - def get_featurestore(self) -> Callable[ - [featurestore_service.GetFeaturestoreRequest], - featurestore.Featurestore]: - r"""Return a callable for the get featurestore method over gRPC. - - Gets details of a single Featurestore. - - Returns: - Callable[[~.GetFeaturestoreRequest], - ~.Featurestore]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_featurestore' not in self._stubs: - self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore', - request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, - response_deserializer=featurestore.Featurestore.deserialize, - ) - return self._stubs['get_featurestore'] - - @property - def list_featurestores(self) -> Callable[ - [featurestore_service.ListFeaturestoresRequest], - featurestore_service.ListFeaturestoresResponse]: - r"""Return a callable for the list featurestores method over gRPC. - - Lists Featurestores in a given project and location. - - Returns: - Callable[[~.ListFeaturestoresRequest], - ~.ListFeaturestoresResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_featurestores' not in self._stubs: - self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores', - request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, - response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, - ) - return self._stubs['list_featurestores'] - - @property - def update_featurestore(self) -> Callable[ - [featurestore_service.UpdateFeaturestoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the update featurestore method over gRPC. - - Updates the parameters of a single Featurestore. - - Returns: - Callable[[~.UpdateFeaturestoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_featurestore' not in self._stubs: - self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore', - request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_featurestore'] - - @property - def delete_featurestore(self) -> Callable[ - [featurestore_service.DeleteFeaturestoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete featurestore method over gRPC. - - Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Returns: - Callable[[~.DeleteFeaturestoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_featurestore' not in self._stubs: - self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore', - request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_featurestore'] - - @property - def create_entity_type(self) -> Callable[ - [featurestore_service.CreateEntityTypeRequest], - operations_pb2.Operation]: - r"""Return a callable for the create entity type method over gRPC. - - Creates a new EntityType in a given Featurestore. - - Returns: - Callable[[~.CreateEntityTypeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_entity_type' not in self._stubs: - self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType', - request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_entity_type'] - - @property - def get_entity_type(self) -> Callable[ - [featurestore_service.GetEntityTypeRequest], - entity_type.EntityType]: - r"""Return a callable for the get entity type method over gRPC. - - Gets details of a single EntityType. - - Returns: - Callable[[~.GetEntityTypeRequest], - ~.EntityType]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_entity_type' not in self._stubs: - self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType', - request_serializer=featurestore_service.GetEntityTypeRequest.serialize, - response_deserializer=entity_type.EntityType.deserialize, - ) - return self._stubs['get_entity_type'] - - @property - def list_entity_types(self) -> Callable[ - [featurestore_service.ListEntityTypesRequest], - featurestore_service.ListEntityTypesResponse]: - r"""Return a callable for the list entity types method over gRPC. - - Lists EntityTypes in a given Featurestore. - - Returns: - Callable[[~.ListEntityTypesRequest], - ~.ListEntityTypesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_entity_types' not in self._stubs: - self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes', - request_serializer=featurestore_service.ListEntityTypesRequest.serialize, - response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, - ) - return self._stubs['list_entity_types'] - - @property - def update_entity_type(self) -> Callable[ - [featurestore_service.UpdateEntityTypeRequest], - gca_entity_type.EntityType]: - r"""Return a callable for the update entity type method over gRPC. - - Updates the parameters of a single EntityType. - - Returns: - Callable[[~.UpdateEntityTypeRequest], - ~.EntityType]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_entity_type' not in self._stubs: - self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType', - request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, - response_deserializer=gca_entity_type.EntityType.deserialize, - ) - return self._stubs['update_entity_type'] - - @property - def delete_entity_type(self) -> Callable[ - [featurestore_service.DeleteEntityTypeRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete entity type method over gRPC. - - Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Returns: - Callable[[~.DeleteEntityTypeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_entity_type' not in self._stubs: - self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType', - request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_entity_type'] - - @property - def create_feature(self) -> Callable[ - [featurestore_service.CreateFeatureRequest], - operations_pb2.Operation]: - r"""Return a callable for the create feature method over gRPC. - - Creates a new Feature in a given EntityType. - - Returns: - Callable[[~.CreateFeatureRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_feature' not in self._stubs: - self._stubs['create_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature', - request_serializer=featurestore_service.CreateFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_feature'] - - @property - def batch_create_features(self) -> Callable[ - [featurestore_service.BatchCreateFeaturesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch create features method over gRPC. - - Creates a batch of Features in a given EntityType. - - Returns: - Callable[[~.BatchCreateFeaturesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_features' not in self._stubs: - self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures', - request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_create_features'] - - @property - def get_feature(self) -> Callable[ - [featurestore_service.GetFeatureRequest], - feature.Feature]: - r"""Return a callable for the get feature method over gRPC. - - Gets details of a single Feature. - - Returns: - Callable[[~.GetFeatureRequest], - ~.Feature]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_feature' not in self._stubs: - self._stubs['get_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature', - request_serializer=featurestore_service.GetFeatureRequest.serialize, - response_deserializer=feature.Feature.deserialize, - ) - return self._stubs['get_feature'] - - @property - def list_features(self) -> Callable[ - [featurestore_service.ListFeaturesRequest], - featurestore_service.ListFeaturesResponse]: - r"""Return a callable for the list features method over gRPC. - - Lists Features in a given EntityType. - - Returns: - Callable[[~.ListFeaturesRequest], - ~.ListFeaturesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_features' not in self._stubs: - self._stubs['list_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures', - request_serializer=featurestore_service.ListFeaturesRequest.serialize, - response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, - ) - return self._stubs['list_features'] - - @property - def update_feature(self) -> Callable[ - [featurestore_service.UpdateFeatureRequest], - gca_feature.Feature]: - r"""Return a callable for the update feature method over gRPC. - - Updates the parameters of a single Feature. - - Returns: - Callable[[~.UpdateFeatureRequest], - ~.Feature]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_feature' not in self._stubs: - self._stubs['update_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature', - request_serializer=featurestore_service.UpdateFeatureRequest.serialize, - response_deserializer=gca_feature.Feature.deserialize, - ) - return self._stubs['update_feature'] - - @property - def delete_feature(self) -> Callable[ - [featurestore_service.DeleteFeatureRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete feature method over gRPC. - - Deletes a single Feature. - - Returns: - Callable[[~.DeleteFeatureRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_feature' not in self._stubs: - self._stubs['delete_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature', - request_serializer=featurestore_service.DeleteFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_feature'] - - @property - def import_feature_values(self) -> Callable[ - [featurestore_service.ImportFeatureValuesRequest], - operations_pb2.Operation]: - r"""Return a callable for the import feature values method over gRPC. - - Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Returns: - Callable[[~.ImportFeatureValuesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_feature_values' not in self._stubs: - self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues', - request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_feature_values'] - - @property - def batch_read_feature_values(self) -> Callable[ - [featurestore_service.BatchReadFeatureValuesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch read feature values method over gRPC. - - Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Returns: - Callable[[~.BatchReadFeatureValuesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_feature_values' not in self._stubs: - self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues', - request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_read_feature_values'] - - @property - def export_feature_values(self) -> Callable[ - [featurestore_service.ExportFeatureValuesRequest], - operations_pb2.Operation]: - r"""Return a callable for the export feature values method over gRPC. - - Exports Feature values from all the entities of a - target EntityType. - - Returns: - Callable[[~.ExportFeatureValuesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_feature_values' not in self._stubs: - self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues', - request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_feature_values'] - - @property - def search_features(self) -> Callable[ - [featurestore_service.SearchFeaturesRequest], - featurestore_service.SearchFeaturesResponse]: - r"""Return a callable for the search features method over gRPC. - - Searches Features matching a query in a given - project. - - Returns: - Callable[[~.SearchFeaturesRequest], - ~.SearchFeaturesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_features' not in self._stubs: - self._stubs['search_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures', - request_serializer=featurestore_service.SearchFeaturesRequest.serialize, - response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, - ) - return self._stubs['search_features'] - - -__all__ = ( - 'FeaturestoreServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py deleted file mode 100644 index a73c769f4d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,807 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.longrunning import operations_pb2 # type: ignore -from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import FeaturestoreServiceGrpcTransport - - -class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport): - """gRPC AsyncIO backend transport for FeaturestoreService. - - The service that handles CRUD and List for resources for - Featurestore. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_featurestore(self) -> Callable[ - [featurestore_service.CreateFeaturestoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create featurestore method over gRPC. - - Creates a new Featurestore in a given project and - location. - - Returns: - Callable[[~.CreateFeaturestoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_featurestore' not in self._stubs: - self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore', - request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_featurestore'] - - @property - def get_featurestore(self) -> Callable[ - [featurestore_service.GetFeaturestoreRequest], - Awaitable[featurestore.Featurestore]]: - r"""Return a callable for the get featurestore method over gRPC. - - Gets details of a single Featurestore. - - Returns: - Callable[[~.GetFeaturestoreRequest], - Awaitable[~.Featurestore]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_featurestore' not in self._stubs: - self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore', - request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, - response_deserializer=featurestore.Featurestore.deserialize, - ) - return self._stubs['get_featurestore'] - - @property - def list_featurestores(self) -> Callable[ - [featurestore_service.ListFeaturestoresRequest], - Awaitable[featurestore_service.ListFeaturestoresResponse]]: - r"""Return a callable for the list featurestores method over gRPC. - - Lists Featurestores in a given project and location. - - Returns: - Callable[[~.ListFeaturestoresRequest], - Awaitable[~.ListFeaturestoresResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_featurestores' not in self._stubs: - self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores', - request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, - response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, - ) - return self._stubs['list_featurestores'] - - @property - def update_featurestore(self) -> Callable[ - [featurestore_service.UpdateFeaturestoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update featurestore method over gRPC. - - Updates the parameters of a single Featurestore. - - Returns: - Callable[[~.UpdateFeaturestoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_featurestore' not in self._stubs: - self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore', - request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_featurestore'] - - @property - def delete_featurestore(self) -> Callable[ - [featurestore_service.DeleteFeaturestoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete featurestore method over gRPC. - - Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Returns: - Callable[[~.DeleteFeaturestoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_featurestore' not in self._stubs: - self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore', - request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_featurestore'] - - @property - def create_entity_type(self) -> Callable[ - [featurestore_service.CreateEntityTypeRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create entity type method over gRPC. - - Creates a new EntityType in a given Featurestore. - - Returns: - Callable[[~.CreateEntityTypeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_entity_type' not in self._stubs: - self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType', - request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_entity_type'] - - @property - def get_entity_type(self) -> Callable[ - [featurestore_service.GetEntityTypeRequest], - Awaitable[entity_type.EntityType]]: - r"""Return a callable for the get entity type method over gRPC. - - Gets details of a single EntityType. - - Returns: - Callable[[~.GetEntityTypeRequest], - Awaitable[~.EntityType]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_entity_type' not in self._stubs: - self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType', - request_serializer=featurestore_service.GetEntityTypeRequest.serialize, - response_deserializer=entity_type.EntityType.deserialize, - ) - return self._stubs['get_entity_type'] - - @property - def list_entity_types(self) -> Callable[ - [featurestore_service.ListEntityTypesRequest], - Awaitable[featurestore_service.ListEntityTypesResponse]]: - r"""Return a callable for the list entity types method over gRPC. - - Lists EntityTypes in a given Featurestore. - - Returns: - Callable[[~.ListEntityTypesRequest], - Awaitable[~.ListEntityTypesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_entity_types' not in self._stubs: - self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes', - request_serializer=featurestore_service.ListEntityTypesRequest.serialize, - response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, - ) - return self._stubs['list_entity_types'] - - @property - def update_entity_type(self) -> Callable[ - [featurestore_service.UpdateEntityTypeRequest], - Awaitable[gca_entity_type.EntityType]]: - r"""Return a callable for the update entity type method over gRPC. - - Updates the parameters of a single EntityType. - - Returns: - Callable[[~.UpdateEntityTypeRequest], - Awaitable[~.EntityType]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_entity_type' not in self._stubs: - self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType', - request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, - response_deserializer=gca_entity_type.EntityType.deserialize, - ) - return self._stubs['update_entity_type'] - - @property - def delete_entity_type(self) -> Callable[ - [featurestore_service.DeleteEntityTypeRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete entity type method over gRPC. - - Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Returns: - Callable[[~.DeleteEntityTypeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_entity_type' not in self._stubs: - self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType', - request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_entity_type'] - - @property - def create_feature(self) -> Callable[ - [featurestore_service.CreateFeatureRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create feature method over gRPC. - - Creates a new Feature in a given EntityType. - - Returns: - Callable[[~.CreateFeatureRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_feature' not in self._stubs: - self._stubs['create_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature', - request_serializer=featurestore_service.CreateFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_feature'] - - @property - def batch_create_features(self) -> Callable[ - [featurestore_service.BatchCreateFeaturesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch create features method over gRPC. - - Creates a batch of Features in a given EntityType. - - Returns: - Callable[[~.BatchCreateFeaturesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_features' not in self._stubs: - self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures', - request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_create_features'] - - @property - def get_feature(self) -> Callable[ - [featurestore_service.GetFeatureRequest], - Awaitable[feature.Feature]]: - r"""Return a callable for the get feature method over gRPC. - - Gets details of a single Feature. - - Returns: - Callable[[~.GetFeatureRequest], - Awaitable[~.Feature]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_feature' not in self._stubs: - self._stubs['get_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature', - request_serializer=featurestore_service.GetFeatureRequest.serialize, - response_deserializer=feature.Feature.deserialize, - ) - return self._stubs['get_feature'] - - @property - def list_features(self) -> Callable[ - [featurestore_service.ListFeaturesRequest], - Awaitable[featurestore_service.ListFeaturesResponse]]: - r"""Return a callable for the list features method over gRPC. - - Lists Features in a given EntityType. - - Returns: - Callable[[~.ListFeaturesRequest], - Awaitable[~.ListFeaturesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_features' not in self._stubs: - self._stubs['list_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures', - request_serializer=featurestore_service.ListFeaturesRequest.serialize, - response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, - ) - return self._stubs['list_features'] - - @property - def update_feature(self) -> Callable[ - [featurestore_service.UpdateFeatureRequest], - Awaitable[gca_feature.Feature]]: - r"""Return a callable for the update feature method over gRPC. - - Updates the parameters of a single Feature. - - Returns: - Callable[[~.UpdateFeatureRequest], - Awaitable[~.Feature]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_feature' not in self._stubs: - self._stubs['update_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature', - request_serializer=featurestore_service.UpdateFeatureRequest.serialize, - response_deserializer=gca_feature.Feature.deserialize, - ) - return self._stubs['update_feature'] - - @property - def delete_feature(self) -> Callable[ - [featurestore_service.DeleteFeatureRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete feature method over gRPC. - - Deletes a single Feature. - - Returns: - Callable[[~.DeleteFeatureRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_feature' not in self._stubs: - self._stubs['delete_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature', - request_serializer=featurestore_service.DeleteFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_feature'] - - @property - def import_feature_values(self) -> Callable[ - [featurestore_service.ImportFeatureValuesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the import feature values method over gRPC. - - Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Returns: - Callable[[~.ImportFeatureValuesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_feature_values' not in self._stubs: - self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues', - request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_feature_values'] - - @property - def batch_read_feature_values(self) -> Callable[ - [featurestore_service.BatchReadFeatureValuesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch read feature values method over gRPC. - - Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Returns: - Callable[[~.BatchReadFeatureValuesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_feature_values' not in self._stubs: - self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues', - request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_read_feature_values'] - - @property - def export_feature_values(self) -> Callable[ - [featurestore_service.ExportFeatureValuesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export feature values method over gRPC. - - Exports Feature values from all the entities of a - target EntityType. - - Returns: - Callable[[~.ExportFeatureValuesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_feature_values' not in self._stubs: - self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues', - request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_feature_values'] - - @property - def search_features(self) -> Callable[ - [featurestore_service.SearchFeaturesRequest], - Awaitable[featurestore_service.SearchFeaturesResponse]]: - r"""Return a callable for the search features method over gRPC. - - Searches Features matching a query in a given - project. - - Returns: - Callable[[~.SearchFeaturesRequest], - Awaitable[~.SearchFeaturesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_features' not in self._stubs: - self._stubs['search_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures', - request_serializer=featurestore_service.SearchFeaturesRequest.serialize, - response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, - ) - return self._stubs['search_features'] - - -__all__ = ( - 'FeaturestoreServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py deleted file mode 100644 index fb5d596b18..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import IndexEndpointServiceClient -from .async_client import IndexEndpointServiceAsyncClient - -__all__ = ( - 'IndexEndpointServiceClient', - 'IndexEndpointServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py deleted file mode 100644 index c7f921df76..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ /dev/null @@ -1,817 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport -from .client import IndexEndpointServiceClient - - -class IndexEndpointServiceAsyncClient: - """A service for managing Vertex AI's IndexEndpoints.""" - - _client: IndexEndpointServiceClient - - DEFAULT_ENDPOINT = IndexEndpointServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = IndexEndpointServiceClient.DEFAULT_MTLS_ENDPOINT - - index_path = staticmethod(IndexEndpointServiceClient.index_path) - parse_index_path = staticmethod(IndexEndpointServiceClient.parse_index_path) - index_endpoint_path = staticmethod(IndexEndpointServiceClient.index_endpoint_path) - parse_index_endpoint_path = staticmethod(IndexEndpointServiceClient.parse_index_endpoint_path) - common_billing_account_path = staticmethod(IndexEndpointServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(IndexEndpointServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(IndexEndpointServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(IndexEndpointServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(IndexEndpointServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(IndexEndpointServiceClient.parse_common_organization_path) - common_project_path = staticmethod(IndexEndpointServiceClient.common_project_path) - parse_common_project_path = staticmethod(IndexEndpointServiceClient.parse_common_project_path) - common_location_path = staticmethod(IndexEndpointServiceClient.common_location_path) - parse_common_location_path = staticmethod(IndexEndpointServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceAsyncClient: The constructed client. - """ - return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceAsyncClient: The constructed client. - """ - return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexEndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexEndpointServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(IndexEndpointServiceClient).get_transport_class, type(IndexEndpointServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, IndexEndpointServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.IndexEndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = IndexEndpointServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_index_endpoint(self, - request: index_endpoint_service.CreateIndexEndpointRequest = None, - *, - parent: str = None, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an IndexEndpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest`): - The request object. Request message for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. - parent (:class:`str`): - Required. The resource name of the Location to create - the IndexEndpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): - Required. The IndexEndpoint to - create. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index_endpoint]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.CreateIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index_endpoint is not None: - request.index_endpoint = index_endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_index_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_index_endpoint.IndexEndpoint, - metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_index_endpoint(self, - request: index_endpoint_service.GetIndexEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index_endpoint.IndexEndpoint: - r"""Gets an IndexEndpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest`): - The request object. Request message for - [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] - name (:class:`str`): - Required. The name of the IndexEndpoint resource. - Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.GetIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_index_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_index_endpoints(self, - request: index_endpoint_service.ListIndexEndpointsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexEndpointsAsyncPager: - r"""Lists IndexEndpoints in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest`): - The request object. Request message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the IndexEndpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager: - Response message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.ListIndexEndpointsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_index_endpoints, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListIndexEndpointsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_index_endpoint(self, - request: index_endpoint_service.UpdateIndexEndpointRequest = None, - *, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_index_endpoint.IndexEndpoint: - r"""Updates an IndexEndpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest`): - The request object. Request message for - [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. - index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): - Required. The IndexEndpoint which - replaces the resource on the server. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.UpdateIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_index_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint.name", request.index_endpoint.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_index_endpoint(self, - request: index_endpoint_service.DeleteIndexEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an IndexEndpoint. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest`): - The request object. Request message for - [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. - name (:class:`str`): - Required. The name of the IndexEndpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.DeleteIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_index_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def deploy_index(self, - request: index_endpoint_service.DeployIndexRequest = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeployIndexRequest`): - The request object. Request message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - index_endpoint (:class:`str`): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (:class:`google.cloud.aiplatform_v1beta1.types.DeployedIndex`): - Required. The DeployedIndex to be - created within the IndexEndpoint. - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` - Response message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.DeployIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.deploy_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - index_endpoint_service.DeployIndexResponse, - metadata_type=index_endpoint_service.DeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def undeploy_index(self, - request: index_endpoint_service.UndeployIndexRequest = None, - *, - index_endpoint: str = None, - deployed_index_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest`): - The request object. Request message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - index_endpoint (:class:`str`): - Required. The name of the IndexEndpoint resource from - which to undeploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index_id (:class:`str`): - Required. The ID of the DeployedIndex - to be undeployed from the IndexEndpoint. - - This corresponds to the ``deployed_index_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` - Response message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.UndeployIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index_id is not None: - request.deployed_index_id = deployed_index_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undeploy_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - index_endpoint_service.UndeployIndexResponse, - metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexEndpointServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py deleted file mode 100644 index c3c79243f2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py +++ /dev/null @@ -1,1013 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import IndexEndpointServiceGrpcTransport -from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport - - -class IndexEndpointServiceClientMeta(type): - """Metaclass for the IndexEndpointService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] - _transport_registry["grpc"] = IndexEndpointServiceGrpcTransport - _transport_registry["grpc_asyncio"] = IndexEndpointServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[IndexEndpointServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class IndexEndpointServiceClient(metaclass=IndexEndpointServiceClientMeta): - """A service for managing Vertex AI's IndexEndpoints.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexEndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexEndpointServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def index_path(project: str,location: str,index: str,) -> str: - """Returns a fully-qualified index string.""" - return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - - @staticmethod - def parse_index_path(path: str) -> Dict[str,str]: - """Parses a index path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: - """Returns a fully-qualified index_endpoint string.""" - return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - - @staticmethod - def parse_index_endpoint_path(path: str) -> Dict[str,str]: - """Parses a index_endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, IndexEndpointServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, IndexEndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, IndexEndpointServiceTransport): - # transport is a IndexEndpointServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_index_endpoint(self, - request: index_endpoint_service.CreateIndexEndpointRequest = None, - *, - parent: str = None, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an IndexEndpoint. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest): - The request object. Request message for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. - parent (str): - Required. The resource name of the Location to create - the IndexEndpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): - Required. The IndexEndpoint to - create. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index_endpoint]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.CreateIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.CreateIndexEndpointRequest): - request = index_endpoint_service.CreateIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index_endpoint is not None: - request.index_endpoint = index_endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_index_endpoint.IndexEndpoint, - metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - def get_index_endpoint(self, - request: index_endpoint_service.GetIndexEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index_endpoint.IndexEndpoint: - r"""Gets an IndexEndpoint. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest): - The request object. Request message for - [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] - name (str): - Required. The name of the IndexEndpoint resource. - Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.GetIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.GetIndexEndpointRequest): - request = index_endpoint_service.GetIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_index_endpoints(self, - request: index_endpoint_service.ListIndexEndpointsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexEndpointsPager: - r"""Lists IndexEndpoints in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): - The request object. Request message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - parent (str): - Required. The resource name of the Location from which - to list the IndexEndpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsPager: - Response message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.ListIndexEndpointsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.ListIndexEndpointsRequest): - request = index_endpoint_service.ListIndexEndpointsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_index_endpoints] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListIndexEndpointsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_index_endpoint(self, - request: index_endpoint_service.UpdateIndexEndpointRequest = None, - *, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_index_endpoint.IndexEndpoint: - r"""Updates an IndexEndpoint. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest): - The request object. Request message for - [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. - index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): - Required. The IndexEndpoint which - replaces the resource on the server. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.UpdateIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.UpdateIndexEndpointRequest): - request = index_endpoint_service.UpdateIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint.name", request.index_endpoint.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_index_endpoint(self, - request: index_endpoint_service.DeleteIndexEndpointRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an IndexEndpoint. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest): - The request object. Request message for - [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. - name (str): - Required. The name of the IndexEndpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.DeleteIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.DeleteIndexEndpointRequest): - request = index_endpoint_service.DeleteIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def deploy_index(self, - request: index_endpoint_service.DeployIndexRequest = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeployIndexRequest): - The request object. Request message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - index_endpoint (str): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): - Required. The DeployedIndex to be - created within the IndexEndpoint. - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` - Response message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.DeployIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.DeployIndexRequest): - request = index_endpoint_service.DeployIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.deploy_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - index_endpoint_service.DeployIndexResponse, - metadata_type=index_endpoint_service.DeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def undeploy_index(self, - request: index_endpoint_service.UndeployIndexRequest = None, - *, - index_endpoint: str = None, - deployed_index_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest): - The request object. Request message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - index_endpoint (str): - Required. The name of the IndexEndpoint resource from - which to undeploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index_id (str): - Required. The ID of the DeployedIndex - to be undeployed from the IndexEndpoint. - - This corresponds to the ``deployed_index_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` - Response message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.UndeployIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.UndeployIndexRequest): - request = index_endpoint_service.UndeployIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index_id is not None: - request.deployed_index_id = deployed_index_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undeploy_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - index_endpoint_service.UndeployIndexResponse, - metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexEndpointServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py deleted file mode 100644 index 203d843765..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service - - -class ListIndexEndpointsPager: - """A pager for iterating through ``list_index_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``index_endpoints`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListIndexEndpoints`` requests and continue to iterate - through the ``index_endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., index_endpoint_service.ListIndexEndpointsResponse], - request: index_endpoint_service.ListIndexEndpointsRequest, - response: index_endpoint_service.ListIndexEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_endpoint_service.ListIndexEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[index_endpoint_service.ListIndexEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[index_endpoint.IndexEndpoint]: - for page in self.pages: - yield from page.index_endpoints - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListIndexEndpointsAsyncPager: - """A pager for iterating through ``list_index_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``index_endpoints`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListIndexEndpoints`` requests and continue to iterate - through the ``index_endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[index_endpoint_service.ListIndexEndpointsResponse]], - request: index_endpoint_service.ListIndexEndpointsRequest, - response: index_endpoint_service.ListIndexEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_endpoint_service.ListIndexEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[index_endpoint_service.ListIndexEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[index_endpoint.IndexEndpoint]: - async def async_generator(): - async for page in self.pages: - for response in page.index_endpoints: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py deleted file mode 100644 index 42d3519efd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import IndexEndpointServiceTransport -from .grpc import IndexEndpointServiceGrpcTransport -from .grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] -_transport_registry['grpc'] = IndexEndpointServiceGrpcTransport -_transport_registry['grpc_asyncio'] = IndexEndpointServiceGrpcAsyncIOTransport - -__all__ = ( - 'IndexEndpointServiceTransport', - 'IndexEndpointServiceGrpcTransport', - 'IndexEndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py deleted file mode 100644 index 53f8a3e391..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py +++ /dev/null @@ -1,261 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class IndexEndpointServiceTransport(abc.ABC): - """Abstract transport class for IndexEndpointService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_index_endpoint: gapic_v1.method.wrap_method( - self.create_index_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.get_index_endpoint: gapic_v1.method.wrap_method( - self.get_index_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.list_index_endpoints: gapic_v1.method.wrap_method( - self.list_index_endpoints, - default_timeout=5.0, - client_info=client_info, - ), - self.update_index_endpoint: gapic_v1.method.wrap_method( - self.update_index_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_index_endpoint: gapic_v1.method.wrap_method( - self.delete_index_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.deploy_index: gapic_v1.method.wrap_method( - self.deploy_index, - default_timeout=5.0, - client_info=client_info, - ), - self.undeploy_index: gapic_v1.method.wrap_method( - self.undeploy_index, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_index_endpoint(self) -> Callable[ - [index_endpoint_service.CreateIndexEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_index_endpoint(self) -> Callable[ - [index_endpoint_service.GetIndexEndpointRequest], - Union[ - index_endpoint.IndexEndpoint, - Awaitable[index_endpoint.IndexEndpoint] - ]]: - raise NotImplementedError() - - @property - def list_index_endpoints(self) -> Callable[ - [index_endpoint_service.ListIndexEndpointsRequest], - Union[ - index_endpoint_service.ListIndexEndpointsResponse, - Awaitable[index_endpoint_service.ListIndexEndpointsResponse] - ]]: - raise NotImplementedError() - - @property - def update_index_endpoint(self) -> Callable[ - [index_endpoint_service.UpdateIndexEndpointRequest], - Union[ - gca_index_endpoint.IndexEndpoint, - Awaitable[gca_index_endpoint.IndexEndpoint] - ]]: - raise NotImplementedError() - - @property - def delete_index_endpoint(self) -> Callable[ - [index_endpoint_service.DeleteIndexEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def deploy_index(self) -> Callable[ - [index_endpoint_service.DeployIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def undeploy_index(self) -> Callable[ - [index_endpoint_service.UndeployIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'IndexEndpointServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py deleted file mode 100644 index 714fe8073c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py +++ /dev/null @@ -1,433 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO - - -class IndexEndpointServiceGrpcTransport(IndexEndpointServiceTransport): - """gRPC backend transport for IndexEndpointService. - - A service for managing Vertex AI's IndexEndpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index_endpoint(self) -> Callable[ - [index_endpoint_service.CreateIndexEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the create index endpoint method over gRPC. - - Creates an IndexEndpoint. - - Returns: - Callable[[~.CreateIndexEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index_endpoint' not in self._stubs: - self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint', - request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index_endpoint'] - - @property - def get_index_endpoint(self) -> Callable[ - [index_endpoint_service.GetIndexEndpointRequest], - index_endpoint.IndexEndpoint]: - r"""Return a callable for the get index endpoint method over gRPC. - - Gets an IndexEndpoint. - - Returns: - Callable[[~.GetIndexEndpointRequest], - ~.IndexEndpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index_endpoint' not in self._stubs: - self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint', - request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, - response_deserializer=index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['get_index_endpoint'] - - @property - def list_index_endpoints(self) -> Callable[ - [index_endpoint_service.ListIndexEndpointsRequest], - index_endpoint_service.ListIndexEndpointsResponse]: - r"""Return a callable for the list index endpoints method over gRPC. - - Lists IndexEndpoints in a Location. - - Returns: - Callable[[~.ListIndexEndpointsRequest], - ~.ListIndexEndpointsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_index_endpoints' not in self._stubs: - self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints', - request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, - response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, - ) - return self._stubs['list_index_endpoints'] - - @property - def update_index_endpoint(self) -> Callable[ - [index_endpoint_service.UpdateIndexEndpointRequest], - gca_index_endpoint.IndexEndpoint]: - r"""Return a callable for the update index endpoint method over gRPC. - - Updates an IndexEndpoint. - - Returns: - Callable[[~.UpdateIndexEndpointRequest], - ~.IndexEndpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index_endpoint' not in self._stubs: - self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint', - request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, - response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['update_index_endpoint'] - - @property - def delete_index_endpoint(self) -> Callable[ - [index_endpoint_service.DeleteIndexEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete index endpoint method over gRPC. - - Deletes an IndexEndpoint. - - Returns: - Callable[[~.DeleteIndexEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index_endpoint' not in self._stubs: - self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint', - request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index_endpoint'] - - @property - def deploy_index(self) -> Callable[ - [index_endpoint_service.DeployIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the deploy index method over gRPC. - - Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Returns: - Callable[[~.DeployIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_index' not in self._stubs: - self._stubs['deploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex', - request_serializer=index_endpoint_service.DeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_index'] - - @property - def undeploy_index(self) -> Callable[ - [index_endpoint_service.UndeployIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the undeploy index method over gRPC. - - Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_index' not in self._stubs: - self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex', - request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_index'] - - -__all__ = ( - 'IndexEndpointServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py deleted file mode 100644 index ba6a67a554..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,437 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import IndexEndpointServiceGrpcTransport - - -class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport): - """gRPC AsyncIO backend transport for IndexEndpointService. - - A service for managing Vertex AI's IndexEndpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index_endpoint(self) -> Callable[ - [index_endpoint_service.CreateIndexEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create index endpoint method over gRPC. - - Creates an IndexEndpoint. - - Returns: - Callable[[~.CreateIndexEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index_endpoint' not in self._stubs: - self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint', - request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index_endpoint'] - - @property - def get_index_endpoint(self) -> Callable[ - [index_endpoint_service.GetIndexEndpointRequest], - Awaitable[index_endpoint.IndexEndpoint]]: - r"""Return a callable for the get index endpoint method over gRPC. - - Gets an IndexEndpoint. - - Returns: - Callable[[~.GetIndexEndpointRequest], - Awaitable[~.IndexEndpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index_endpoint' not in self._stubs: - self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint', - request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, - response_deserializer=index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['get_index_endpoint'] - - @property - def list_index_endpoints(self) -> Callable[ - [index_endpoint_service.ListIndexEndpointsRequest], - Awaitable[index_endpoint_service.ListIndexEndpointsResponse]]: - r"""Return a callable for the list index endpoints method over gRPC. - - Lists IndexEndpoints in a Location. - - Returns: - Callable[[~.ListIndexEndpointsRequest], - Awaitable[~.ListIndexEndpointsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_index_endpoints' not in self._stubs: - self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints', - request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, - response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, - ) - return self._stubs['list_index_endpoints'] - - @property - def update_index_endpoint(self) -> Callable[ - [index_endpoint_service.UpdateIndexEndpointRequest], - Awaitable[gca_index_endpoint.IndexEndpoint]]: - r"""Return a callable for the update index endpoint method over gRPC. - - Updates an IndexEndpoint. - - Returns: - Callable[[~.UpdateIndexEndpointRequest], - Awaitable[~.IndexEndpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index_endpoint' not in self._stubs: - self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint', - request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, - response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['update_index_endpoint'] - - @property - def delete_index_endpoint(self) -> Callable[ - [index_endpoint_service.DeleteIndexEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete index endpoint method over gRPC. - - Deletes an IndexEndpoint. - - Returns: - Callable[[~.DeleteIndexEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index_endpoint' not in self._stubs: - self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint', - request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index_endpoint'] - - @property - def deploy_index(self) -> Callable[ - [index_endpoint_service.DeployIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the deploy index method over gRPC. - - Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Returns: - Callable[[~.DeployIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_index' not in self._stubs: - self._stubs['deploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex', - request_serializer=index_endpoint_service.DeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_index'] - - @property - def undeploy_index(self) -> Callable[ - [index_endpoint_service.UndeployIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undeploy index method over gRPC. - - Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_index' not in self._stubs: - self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex', - request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_index'] - - -__all__ = ( - 'IndexEndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py deleted file mode 100644 index d2a09db9f1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import IndexServiceClient -from .async_client import IndexServiceAsyncClient - -__all__ = ( - 'IndexServiceClient', - 'IndexServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py deleted file mode 100644 index eabfafcc6e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_service import pagers -from google.cloud.aiplatform_v1beta1.types import deployed_index_ref -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index as gca_index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport -from .client import IndexServiceClient - - -class IndexServiceAsyncClient: - """A service for creating and managing Vertex AI's Index - resources. - """ - - _client: IndexServiceClient - - DEFAULT_ENDPOINT = IndexServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = IndexServiceClient.DEFAULT_MTLS_ENDPOINT - - index_path = staticmethod(IndexServiceClient.index_path) - parse_index_path = staticmethod(IndexServiceClient.parse_index_path) - index_endpoint_path = staticmethod(IndexServiceClient.index_endpoint_path) - parse_index_endpoint_path = staticmethod(IndexServiceClient.parse_index_endpoint_path) - common_billing_account_path = staticmethod(IndexServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(IndexServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(IndexServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(IndexServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(IndexServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(IndexServiceClient.parse_common_organization_path) - common_project_path = staticmethod(IndexServiceClient.common_project_path) - parse_common_project_path = staticmethod(IndexServiceClient.parse_common_project_path) - common_location_path = staticmethod(IndexServiceClient.common_location_path) - parse_common_location_path = staticmethod(IndexServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceAsyncClient: The constructed client. - """ - return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceAsyncClient: The constructed client. - """ - return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(IndexServiceClient).get_transport_class, type(IndexServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, IndexServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.IndexServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = IndexServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_index(self, - request: index_service.CreateIndexRequest = None, - *, - parent: str = None, - index: gca_index.Index = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an Index. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateIndexRequest`): - The request object. Request message for - [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Index in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): - Required. The Index to create. - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.CreateIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index is not None: - request.index = index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_index.Index, - metadata_type=index_service.CreateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_index(self, - request: index_service.GetIndexRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index.Index: - r"""Gets an Index. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetIndexRequest`): - The request object. Request message for - [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] - name (:class:`str`): - Required. The name of the Index resource. Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Index: - A representation of a collection of - database items organized in a way that - allows for approximate nearest neighbor - (a.k.a ANN) algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.GetIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_indexes(self, - request: index_service.ListIndexesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexesAsyncPager: - r"""Lists Indexes in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListIndexesRequest`): - The request object. Request message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the Indexes. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager: - Response message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.ListIndexesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_indexes, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListIndexesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_index(self, - request: index_service.UpdateIndexRequest = None, - *, - index: gca_index.Index = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates an Index. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest`): - The request object. Request message for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. - index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): - Required. The Index which updates the - resource on the server. - - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.UpdateIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index is not None: - request.index = index - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index.name", request.index.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_index.Index, - metadata_type=index_service.UpdateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_index(self, - request: index_service.DeleteIndexRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] - had been undeployed. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest`): - The request object. Request message for - [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. - name (:class:`str`): - Required. The name of the Index resource to be deleted. - Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.DeleteIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py deleted file mode 100644 index 9e2e612c81..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py +++ /dev/null @@ -1,829 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_service import pagers -from google.cloud.aiplatform_v1beta1.types import deployed_index_ref -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index as gca_index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import IndexServiceGrpcTransport -from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport - - -class IndexServiceClientMeta(type): - """Metaclass for the IndexService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] - _transport_registry["grpc"] = IndexServiceGrpcTransport - _transport_registry["grpc_asyncio"] = IndexServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[IndexServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class IndexServiceClient(metaclass=IndexServiceClientMeta): - """A service for creating and managing Vertex AI's Index - resources. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def index_path(project: str,location: str,index: str,) -> str: - """Returns a fully-qualified index string.""" - return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - - @staticmethod - def parse_index_path(path: str) -> Dict[str,str]: - """Parses a index path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: - """Returns a fully-qualified index_endpoint string.""" - return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - - @staticmethod - def parse_index_endpoint_path(path: str) -> Dict[str,str]: - """Parses a index_endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, IndexServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, IndexServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, IndexServiceTransport): - # transport is a IndexServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_index(self, - request: index_service.CreateIndexRequest = None, - *, - parent: str = None, - index: gca_index.Index = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an Index. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateIndexRequest): - The request object. Request message for - [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. - parent (str): - Required. The resource name of the Location to create - the Index in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index (google.cloud.aiplatform_v1beta1.types.Index): - Required. The Index to create. - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.CreateIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.CreateIndexRequest): - request = index_service.CreateIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index is not None: - request.index = index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_index.Index, - metadata_type=index_service.CreateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def get_index(self, - request: index_service.GetIndexRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index.Index: - r"""Gets an Index. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetIndexRequest): - The request object. Request message for - [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] - name (str): - Required. The name of the Index resource. Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Index: - A representation of a collection of - database items organized in a way that - allows for approximate nearest neighbor - (a.k.a ANN) algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.GetIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.GetIndexRequest): - request = index_service.GetIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_indexes(self, - request: index_service.ListIndexesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexesPager: - r"""Lists Indexes in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): - The request object. Request message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - parent (str): - Required. The resource name of the Location from which - to list the Indexes. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesPager: - Response message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.ListIndexesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.ListIndexesRequest): - request = index_service.ListIndexesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_indexes] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListIndexesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_index(self, - request: index_service.UpdateIndexRequest = None, - *, - index: gca_index.Index = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates an Index. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest): - The request object. Request message for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. - index (google.cloud.aiplatform_v1beta1.types.Index): - Required. The Index which updates the - resource on the server. - - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.UpdateIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.UpdateIndexRequest): - request = index_service.UpdateIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index is not None: - request.index = index - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index.name", request.index.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_index.Index, - metadata_type=index_service.UpdateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_index(self, - request: index_service.DeleteIndexRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] - had been undeployed. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest): - The request object. Request message for - [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. - name (str): - Required. The name of the Index resource to be deleted. - Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.DeleteIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.DeleteIndexRequest): - request = index_service.DeleteIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py deleted file mode 100644 index e4e1d47ada..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index_service - - -class ListIndexesPager: - """A pager for iterating through ``list_indexes`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``indexes`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListIndexes`` requests and continue to iterate - through the ``indexes`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., index_service.ListIndexesResponse], - request: index_service.ListIndexesRequest, - response: index_service.ListIndexesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_service.ListIndexesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[index_service.ListIndexesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[index.Index]: - for page in self.pages: - yield from page.indexes - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListIndexesAsyncPager: - """A pager for iterating through ``list_indexes`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``indexes`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListIndexes`` requests and continue to iterate - through the ``indexes`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[index_service.ListIndexesResponse]], - request: index_service.ListIndexesRequest, - response: index_service.ListIndexesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_service.ListIndexesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[index_service.ListIndexesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[index.Index]: - async def async_generator(): - async for page in self.pages: - for response in page.indexes: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py deleted file mode 100644 index 2f263f2fb8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import IndexServiceTransport -from .grpc import IndexServiceGrpcTransport -from .grpc_asyncio import IndexServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] -_transport_registry['grpc'] = IndexServiceGrpcTransport -_transport_registry['grpc_asyncio'] = IndexServiceGrpcAsyncIOTransport - -__all__ = ( - 'IndexServiceTransport', - 'IndexServiceGrpcTransport', - 'IndexServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py deleted file mode 100644 index 3d126acabe..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py +++ /dev/null @@ -1,232 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class IndexServiceTransport(abc.ABC): - """Abstract transport class for IndexService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_index: gapic_v1.method.wrap_method( - self.create_index, - default_timeout=5.0, - client_info=client_info, - ), - self.get_index: gapic_v1.method.wrap_method( - self.get_index, - default_timeout=5.0, - client_info=client_info, - ), - self.list_indexes: gapic_v1.method.wrap_method( - self.list_indexes, - default_timeout=5.0, - client_info=client_info, - ), - self.update_index: gapic_v1.method.wrap_method( - self.update_index, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_index: gapic_v1.method.wrap_method( - self.delete_index, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_index(self) -> Callable[ - [index_service.CreateIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_index(self) -> Callable[ - [index_service.GetIndexRequest], - Union[ - index.Index, - Awaitable[index.Index] - ]]: - raise NotImplementedError() - - @property - def list_indexes(self) -> Callable[ - [index_service.ListIndexesRequest], - Union[ - index_service.ListIndexesResponse, - Awaitable[index_service.ListIndexesResponse] - ]]: - raise NotImplementedError() - - @property - def update_index(self) -> Callable[ - [index_service.UpdateIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_index(self) -> Callable[ - [index_service.DeleteIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'IndexServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py deleted file mode 100644 index adfe4346db..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py +++ /dev/null @@ -1,379 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO - - -class IndexServiceGrpcTransport(IndexServiceTransport): - """gRPC backend transport for IndexService. - - A service for creating and managing Vertex AI's Index - resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index(self) -> Callable[ - [index_service.CreateIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the create index method over gRPC. - - Creates an Index. - - Returns: - Callable[[~.CreateIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index' not in self._stubs: - self._stubs['create_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex', - request_serializer=index_service.CreateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index'] - - @property - def get_index(self) -> Callable[ - [index_service.GetIndexRequest], - index.Index]: - r"""Return a callable for the get index method over gRPC. - - Gets an Index. - - Returns: - Callable[[~.GetIndexRequest], - ~.Index]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index' not in self._stubs: - self._stubs['get_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/GetIndex', - request_serializer=index_service.GetIndexRequest.serialize, - response_deserializer=index.Index.deserialize, - ) - return self._stubs['get_index'] - - @property - def list_indexes(self) -> Callable[ - [index_service.ListIndexesRequest], - index_service.ListIndexesResponse]: - r"""Return a callable for the list indexes method over gRPC. - - Lists Indexes in a Location. - - Returns: - Callable[[~.ListIndexesRequest], - ~.ListIndexesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_indexes' not in self._stubs: - self._stubs['list_indexes'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes', - request_serializer=index_service.ListIndexesRequest.serialize, - response_deserializer=index_service.ListIndexesResponse.deserialize, - ) - return self._stubs['list_indexes'] - - @property - def update_index(self) -> Callable[ - [index_service.UpdateIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the update index method over gRPC. - - Updates an Index. - - Returns: - Callable[[~.UpdateIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index' not in self._stubs: - self._stubs['update_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex', - request_serializer=index_service.UpdateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_index'] - - @property - def delete_index(self) -> Callable[ - [index_service.DeleteIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete index method over gRPC. - - Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] - had been undeployed. - - Returns: - Callable[[~.DeleteIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index' not in self._stubs: - self._stubs['delete_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex', - request_serializer=index_service.DeleteIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index'] - - -__all__ = ( - 'IndexServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py deleted file mode 100644 index 1ead81529f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,383 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import IndexServiceGrpcTransport - - -class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport): - """gRPC AsyncIO backend transport for IndexService. - - A service for creating and managing Vertex AI's Index - resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index(self) -> Callable[ - [index_service.CreateIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create index method over gRPC. - - Creates an Index. - - Returns: - Callable[[~.CreateIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index' not in self._stubs: - self._stubs['create_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex', - request_serializer=index_service.CreateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index'] - - @property - def get_index(self) -> Callable[ - [index_service.GetIndexRequest], - Awaitable[index.Index]]: - r"""Return a callable for the get index method over gRPC. - - Gets an Index. - - Returns: - Callable[[~.GetIndexRequest], - Awaitable[~.Index]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index' not in self._stubs: - self._stubs['get_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/GetIndex', - request_serializer=index_service.GetIndexRequest.serialize, - response_deserializer=index.Index.deserialize, - ) - return self._stubs['get_index'] - - @property - def list_indexes(self) -> Callable[ - [index_service.ListIndexesRequest], - Awaitable[index_service.ListIndexesResponse]]: - r"""Return a callable for the list indexes method over gRPC. - - Lists Indexes in a Location. - - Returns: - Callable[[~.ListIndexesRequest], - Awaitable[~.ListIndexesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_indexes' not in self._stubs: - self._stubs['list_indexes'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes', - request_serializer=index_service.ListIndexesRequest.serialize, - response_deserializer=index_service.ListIndexesResponse.deserialize, - ) - return self._stubs['list_indexes'] - - @property - def update_index(self) -> Callable[ - [index_service.UpdateIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update index method over gRPC. - - Updates an Index. - - Returns: - Callable[[~.UpdateIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index' not in self._stubs: - self._stubs['update_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex', - request_serializer=index_service.UpdateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_index'] - - @property - def delete_index(self) -> Callable[ - [index_service.DeleteIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete index method over gRPC. - - Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] - had been undeployed. - - Returns: - Callable[[~.DeleteIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index' not in self._stubs: - self._stubs['delete_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex', - request_serializer=index_service.DeleteIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index'] - - -__all__ = ( - 'IndexServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py deleted file mode 100644 index 817e1b49e2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import JobServiceClient -from .async_client import JobServiceAsyncClient - -__all__ = ( - 'JobServiceClient', - 'JobServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py deleted file mode 100644 index eb9907a8ca..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ /dev/null @@ -1,2615 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.job_service import pagers -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import completion_stats -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_monitoring -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import study -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport -from .client import JobServiceClient - - -class JobServiceAsyncClient: - """A service for creating and managing Vertex AI's jobs.""" - - _client: JobServiceClient - - DEFAULT_ENDPOINT = JobServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = JobServiceClient.DEFAULT_MTLS_ENDPOINT - - batch_prediction_job_path = staticmethod(JobServiceClient.batch_prediction_job_path) - parse_batch_prediction_job_path = staticmethod(JobServiceClient.parse_batch_prediction_job_path) - custom_job_path = staticmethod(JobServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(JobServiceClient.parse_custom_job_path) - data_labeling_job_path = staticmethod(JobServiceClient.data_labeling_job_path) - parse_data_labeling_job_path = staticmethod(JobServiceClient.parse_data_labeling_job_path) - dataset_path = staticmethod(JobServiceClient.dataset_path) - parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path) - endpoint_path = staticmethod(JobServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(JobServiceClient.parse_endpoint_path) - hyperparameter_tuning_job_path = staticmethod(JobServiceClient.hyperparameter_tuning_job_path) - parse_hyperparameter_tuning_job_path = staticmethod(JobServiceClient.parse_hyperparameter_tuning_job_path) - model_path = staticmethod(JobServiceClient.model_path) - parse_model_path = staticmethod(JobServiceClient.parse_model_path) - model_deployment_monitoring_job_path = staticmethod(JobServiceClient.model_deployment_monitoring_job_path) - parse_model_deployment_monitoring_job_path = staticmethod(JobServiceClient.parse_model_deployment_monitoring_job_path) - network_path = staticmethod(JobServiceClient.network_path) - parse_network_path = staticmethod(JobServiceClient.parse_network_path) - tensorboard_path = staticmethod(JobServiceClient.tensorboard_path) - parse_tensorboard_path = staticmethod(JobServiceClient.parse_tensorboard_path) - trial_path = staticmethod(JobServiceClient.trial_path) - parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) - common_billing_account_path = staticmethod(JobServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(JobServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(JobServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(JobServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(JobServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(JobServiceClient.parse_common_organization_path) - common_project_path = staticmethod(JobServiceClient.common_project_path) - parse_common_project_path = staticmethod(JobServiceClient.parse_common_project_path) - common_location_path = staticmethod(JobServiceClient.common_location_path) - parse_common_location_path = staticmethod(JobServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceAsyncClient: The constructed client. - """ - return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceAsyncClient: The constructed client. - """ - return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobServiceTransport: - """Returns the transport used by the client instance. - - Returns: - JobServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(JobServiceClient).get_transport_class, type(JobServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, JobServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.JobServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = JobServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_custom_job(self, - request: job_service.CreateCustomJobRequest = None, - *, - parent: str = None, - custom_job: gca_custom_job.CustomJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_custom_job.CustomJob: - r"""Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest`): - The request object. Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - custom_job (:class:`google.cloud.aiplatform_v1beta1.types.CustomJob`): - Required. The CustomJob to create. - This corresponds to the ``custom_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if custom_job is not None: - request.custom_job = custom_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_custom_job(self, - request: job_service.GetCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> custom_job.CustomJob: - r"""Gets a CustomJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest`): - The request object. Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_custom_jobs(self, - request: job_service.ListCustomJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListCustomJobsAsyncPager: - r"""Lists CustomJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest`): - The request object. Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsAsyncPager: - Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListCustomJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_custom_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListCustomJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_custom_job(self, - request: job_service.DeleteCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a CustomJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest`): - The request object. Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_custom_job(self, - request: job_service.CancelCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] - is set to ``CANCELLED``. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest`): - The request object. Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_data_labeling_job(self, - request: job_service.CreateDataLabelingJobRequest = None, - *, - parent: str = None, - data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_data_labeling_job.DataLabelingJob: - r"""Creates a DataLabelingJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest`): - The request object. Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. - parent (:class:`str`): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - data_labeling_job (:class:`google.cloud.aiplatform_v1beta1.types.DataLabelingJob`): - Required. The DataLabelingJob to - create. - - This corresponds to the ``data_labeling_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if data_labeling_job is not None: - request.data_labeling_job = data_labeling_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_data_labeling_job(self, - request: job_service.GetDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> data_labeling_job.DataLabelingJob: - r"""Gets a DataLabelingJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest`): - The request object. Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_data_labeling_jobs(self, - request: job_service.ListDataLabelingJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataLabelingJobsAsyncPager: - r"""Lists DataLabelingJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest`): - The request object. Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - parent (:class:`str`): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: - Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListDataLabelingJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_data_labeling_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDataLabelingJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_data_labeling_job(self, - request: job_service.DeleteDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a DataLabelingJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest`): - The request object. Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_data_labeling_job(self, - request: job_service.CancelDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest`): - The request object. Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_hyperparameter_tuning_job(self, - request: job_service.CreateHyperparameterTuningJobRequest = None, - *, - parent: str = None, - hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Creates a HyperparameterTuningJob - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest`): - The request object. Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - hyperparameter_tuning_job (:class:`google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob`): - Required. The HyperparameterTuningJob - to create. - - This corresponds to the ``hyperparameter_tuning_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if hyperparameter_tuning_job is not None: - request.hyperparameter_tuning_job = hyperparameter_tuning_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_hyperparameter_tuning_job(self, - request: job_service.GetHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Gets a HyperparameterTuningJob - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest`): - The request object. Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob - resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_hyperparameter_tuning_jobs(self, - request: job_service.ListHyperparameterTuningJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHyperparameterTuningJobsAsyncPager: - r"""Lists HyperparameterTuningJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest`): - The request object. Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: - Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListHyperparameterTuningJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_hyperparameter_tuning_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListHyperparameterTuningJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_hyperparameter_tuning_job(self, - request: job_service.DeleteHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a HyperparameterTuningJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest`): - The request object. Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob - resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_hyperparameter_tuning_job(self, - request: job_service.CancelHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest`): - The request object. Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob to - cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_batch_prediction_job(self, - request: job_service.CreateBatchPredictionJobRequest = None, - *, - parent: str = None, - batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_batch_prediction_job.BatchPredictionJob: - r"""Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest`): - The request object. Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - batch_prediction_job (:class:`google.cloud.aiplatform_v1beta1.types.BatchPredictionJob`): - Required. The BatchPredictionJob to - create. - - This corresponds to the ``batch_prediction_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if batch_prediction_job is not None: - request.batch_prediction_job = batch_prediction_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_batch_prediction_job(self, - request: job_service.GetBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> batch_prediction_job.BatchPredictionJob: - r"""Gets a BatchPredictionJob - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest`): - The request object. Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_batch_prediction_jobs(self, - request: job_service.ListBatchPredictionJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBatchPredictionJobsAsyncPager: - r"""Lists BatchPredictionJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest`): - The request object. Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: - Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListBatchPredictionJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_batch_prediction_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListBatchPredictionJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_batch_prediction_job(self, - request: job_service.DeleteBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest`): - The request object. Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob resource to - be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_batch_prediction_job(self, - request: job_service.CancelBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest`): - The request object. Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_model_deployment_monitoring_job(self, - request: job_service.CreateModelDeploymentMonitoringJobRequest = None, - *, - parent: str = None, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest`): - The request object. Request message for - [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. - parent (:class:`str`): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): - Required. The - ModelDeploymentMonitoringJob to create - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_deployment_monitoring_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_model_deployment_monitoring_job, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def search_model_deployment_monitoring_stats_anomalies(self, - request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest = None, - *, - model_deployment_monitoring_job: str = None, - deployed_model_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: - r"""Searches Model Monitoring Statistics generated within - a given time window. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest`): - The request object. Request message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - model_deployment_monitoring_job (:class:`str`): - Required. ModelDeploymentMonitoring Job resource name. - Format: - \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: - Response message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_model_deployment_monitoring_stats_anomalies, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_model_deployment_monitoring_job(self, - request: job_service.GetModelDeploymentMonitoringJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Gets a ModelDeploymentMonitoringJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest`): - The request object. Request message for - [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_deployment_monitoring_jobs(self, - request: job_service.ListModelDeploymentMonitoringJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelDeploymentMonitoringJobsAsyncPager: - r"""Lists ModelDeploymentMonitoringJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest`): - The request object. Request message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - parent (:class:`str`): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager: - Response message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_deployment_monitoring_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelDeploymentMonitoringJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_model_deployment_monitoring_job(self, - request: job_service.UpdateModelDeploymentMonitoringJobRequest = None, - *, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a ModelDeploymentMonitoringJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest`): - The request object. Request message for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. - model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): - Required. The model monitoring - configuration which replaces the - resource on the server. - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an - endpoint. It will analyze the logged training & - prediction data to detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_model_deployment_monitoring_job(self, - request: job_service.DeleteModelDeploymentMonitoringJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a ModelDeploymentMonitoringJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest`): - The request object. Request message for - [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the model monitoring job - to delete. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def pause_model_deployment_monitoring_job(self, - request: job_service.PauseModelDeploymentMonitoringJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest`): - The request object. Request message for - [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the - ModelDeploymentMonitoringJob to pause. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.PauseModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.pause_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def resume_model_deployment_monitoring_job(self, - request: job_service.ResumeModelDeploymentMonitoringJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest`): - The request object. Request message for - [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the - ModelDeploymentMonitoringJob to resume. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.resume_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py deleted file mode 100644 index afa2eb0e0a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ /dev/null @@ -1,2892 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.job_service import pagers -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import completion_stats -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_monitoring -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import study -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import JobServiceGrpcTransport -from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport - - -class JobServiceClientMeta(type): - """Metaclass for the JobService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] - _transport_registry["grpc"] = JobServiceGrpcTransport - _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[JobServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class JobServiceClient(metaclass=JobServiceClientMeta): - """A service for creating and managing Vertex AI's jobs.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobServiceTransport: - """Returns the transport used by the client instance. - - Returns: - JobServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def batch_prediction_job_path(project: str,location: str,batch_prediction_job: str,) -> str: - """Returns a fully-qualified batch_prediction_job string.""" - return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) - - @staticmethod - def parse_batch_prediction_job_path(path: str) -> Dict[str,str]: - """Parses a batch_prediction_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def data_labeling_job_path(project: str,location: str,data_labeling_job: str,) -> str: - """Returns a fully-qualified data_labeling_job string.""" - return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) - - @staticmethod - def parse_data_labeling_job_path(path: str) -> Dict[str,str]: - """Parses a data_labeling_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def hyperparameter_tuning_job_path(project: str,location: str,hyperparameter_tuning_job: str,) -> str: - """Returns a fully-qualified hyperparameter_tuning_job string.""" - return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) - - @staticmethod - def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str,str]: - """Parses a hyperparameter_tuning_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: - """Returns a fully-qualified model_deployment_monitoring_job string.""" - return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - - @staticmethod - def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: - """Parses a model_deployment_monitoring_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: - """Returns a fully-qualified tensorboard string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - - @staticmethod - def parse_tensorboard_path(path: str) -> Dict[str,str]: - """Parses a tensorboard path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def trial_path(project: str,location: str,study: str,trial: str,) -> str: - """Returns a fully-qualified trial string.""" - return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - - @staticmethod - def parse_trial_path(path: str) -> Dict[str,str]: - """Parses a trial path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, JobServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, JobServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, JobServiceTransport): - # transport is a JobServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_custom_job(self, - request: job_service.CreateCustomJobRequest = None, - *, - parent: str = None, - custom_job: gca_custom_job.CustomJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_custom_job.CustomJob: - r"""Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest): - The request object. Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. - parent (str): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - custom_job (google.cloud.aiplatform_v1beta1.types.CustomJob): - Required. The CustomJob to create. - This corresponds to the ``custom_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateCustomJobRequest): - request = job_service.CreateCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if custom_job is not None: - request.custom_job = custom_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_custom_job(self, - request: job_service.GetCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> custom_job.CustomJob: - r"""Gets a CustomJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest): - The request object. Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. - name (str): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetCustomJobRequest): - request = job_service.GetCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_custom_jobs(self, - request: job_service.ListCustomJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListCustomJobsPager: - r"""Lists CustomJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): - The request object. Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. - parent (str): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsPager: - Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListCustomJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListCustomJobsRequest): - request = job_service.ListCustomJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListCustomJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_custom_job(self, - request: job_service.DeleteCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a CustomJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest): - The request object. Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. - name (str): - Required. The name of the CustomJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteCustomJobRequest): - request = job_service.DeleteCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_custom_job(self, - request: job_service.CancelCustomJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] - is set to ``CANCELLED``. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest): - The request object. Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. - name (str): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelCustomJobRequest): - request = job_service.CancelCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_data_labeling_job(self, - request: job_service.CreateDataLabelingJobRequest = None, - *, - parent: str = None, - data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_data_labeling_job.DataLabelingJob: - r"""Creates a DataLabelingJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest): - The request object. Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - data_labeling_job (google.cloud.aiplatform_v1beta1.types.DataLabelingJob): - Required. The DataLabelingJob to - create. - - This corresponds to the ``data_labeling_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateDataLabelingJobRequest): - request = job_service.CreateDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if data_labeling_job is not None: - request.data_labeling_job = data_labeling_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_data_labeling_job(self, - request: job_service.GetDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> data_labeling_job.DataLabelingJob: - r"""Gets a DataLabelingJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest): - The request object. Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetDataLabelingJobRequest): - request = job_service.GetDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_data_labeling_jobs(self, - request: job_service.ListDataLabelingJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataLabelingJobsPager: - r"""Lists DataLabelingJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): - The request object. Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsPager: - Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListDataLabelingJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListDataLabelingJobsRequest): - request = job_service.ListDataLabelingJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDataLabelingJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_data_labeling_job(self, - request: job_service.DeleteDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a DataLabelingJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest): - The request object. Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteDataLabelingJobRequest): - request = job_service.DeleteDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_data_labeling_job(self, - request: job_service.CancelDataLabelingJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest): - The request object. Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelDataLabelingJobRequest): - request = job_service.CancelDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_hyperparameter_tuning_job(self, - request: job_service.CreateHyperparameterTuningJobRequest = None, - *, - parent: str = None, - hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Creates a HyperparameterTuningJob - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest): - The request object. Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. - parent (str): - Required. The resource name of the Location to create - the HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - hyperparameter_tuning_job (google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob): - Required. The HyperparameterTuningJob - to create. - - This corresponds to the ``hyperparameter_tuning_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest): - request = job_service.CreateHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if hyperparameter_tuning_job is not None: - request.hyperparameter_tuning_job = hyperparameter_tuning_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_hyperparameter_tuning_job(self, - request: job_service.GetHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Gets a HyperparameterTuningJob - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest): - The request object. Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob - resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetHyperparameterTuningJobRequest): - request = job_service.GetHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_hyperparameter_tuning_jobs(self, - request: job_service.ListHyperparameterTuningJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHyperparameterTuningJobsPager: - r"""Lists HyperparameterTuningJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): - The request object. Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. - parent (str): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsPager: - Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListHyperparameterTuningJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest): - request = job_service.ListHyperparameterTuningJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_hyperparameter_tuning_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListHyperparameterTuningJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_hyperparameter_tuning_job(self, - request: job_service.DeleteHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a HyperparameterTuningJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest): - The request object. Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob - resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest): - request = job_service.DeleteHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_hyperparameter_tuning_job(self, - request: job_service.CancelHyperparameterTuningJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest): - The request object. Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob to - cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest): - request = job_service.CancelHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_batch_prediction_job(self, - request: job_service.CreateBatchPredictionJobRequest = None, - *, - parent: str = None, - batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_batch_prediction_job.BatchPredictionJob: - r"""Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest): - The request object. Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. - parent (str): - Required. The resource name of the Location to create - the BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - batch_prediction_job (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob): - Required. The BatchPredictionJob to - create. - - This corresponds to the ``batch_prediction_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateBatchPredictionJobRequest): - request = job_service.CreateBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if batch_prediction_job is not None: - request.batch_prediction_job = batch_prediction_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_batch_prediction_job(self, - request: job_service.GetBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> batch_prediction_job.BatchPredictionJob: - r"""Gets a BatchPredictionJob - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest): - The request object. Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetBatchPredictionJobRequest): - request = job_service.GetBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_batch_prediction_jobs(self, - request: job_service.ListBatchPredictionJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBatchPredictionJobsPager: - r"""Lists BatchPredictionJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): - The request object. Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. - parent (str): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsPager: - Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListBatchPredictionJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListBatchPredictionJobsRequest): - request = job_service.ListBatchPredictionJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_batch_prediction_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListBatchPredictionJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_batch_prediction_job(self, - request: job_service.DeleteBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest): - The request object. Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob resource to - be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteBatchPredictionJobRequest): - request = job_service.DeleteBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_batch_prediction_job(self, - request: job_service.CancelBatchPredictionJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest): - The request object. Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelBatchPredictionJobRequest): - request = job_service.CancelBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_model_deployment_monitoring_job(self, - request: job_service.CreateModelDeploymentMonitoringJobRequest = None, - *, - parent: str = None, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest): - The request object. Request message for - [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. - parent (str): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): - Required. The - ModelDeploymentMonitoringJob to create - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_deployment_monitoring_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateModelDeploymentMonitoringJobRequest): - request = job_service.CreateModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def search_model_deployment_monitoring_stats_anomalies(self, - request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest = None, - *, - model_deployment_monitoring_job: str = None, - deployed_model_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: - r"""Searches Model Monitoring Statistics generated within - a given time window. - - Args: - request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - The request object. Request message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - model_deployment_monitoring_job (str): - Required. ModelDeploymentMonitoring Job resource name. - Format: - \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: - Response message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_model_deployment_monitoring_stats_anomalies] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_model_deployment_monitoring_job(self, - request: job_service.GetModelDeploymentMonitoringJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Gets a ModelDeploymentMonitoringJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest): - The request object. Request message for - [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetModelDeploymentMonitoringJobRequest): - request = job_service.GetModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_deployment_monitoring_jobs(self, - request: job_service.ListModelDeploymentMonitoringJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelDeploymentMonitoringJobsPager: - r"""Lists ModelDeploymentMonitoringJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): - The request object. Request message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - parent (str): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager: - Response message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListModelDeploymentMonitoringJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListModelDeploymentMonitoringJobsRequest): - request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_deployment_monitoring_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelDeploymentMonitoringJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_model_deployment_monitoring_job(self, - request: job_service.UpdateModelDeploymentMonitoringJobRequest = None, - *, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a ModelDeploymentMonitoringJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest): - The request object. Request message for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. - model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): - Required. The model monitoring - configuration which replaces the - resource on the server. - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an - endpoint. It will analyze the logged training & - prediction data to detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.UpdateModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.UpdateModelDeploymentMonitoringJobRequest): - request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_model_deployment_monitoring_job(self, - request: job_service.DeleteModelDeploymentMonitoringJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a ModelDeploymentMonitoringJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest): - The request object. Request message for - [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the model monitoring job - to delete. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteModelDeploymentMonitoringJobRequest): - request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def pause_model_deployment_monitoring_job(self, - request: job_service.PauseModelDeploymentMonitoringJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Args: - request (google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest): - The request object. Request message for - [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to pause. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.PauseModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.PauseModelDeploymentMonitoringJobRequest): - request = job_service.PauseModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.pause_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def resume_model_deployment_monitoring_job(self, - request: job_service.ResumeModelDeploymentMonitoringJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest): - The request object. Request message for - [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to resume. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ResumeModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ResumeModelDeploymentMonitoringJobRequest): - request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.resume_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py deleted file mode 100644 index 2b97cbd84a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py +++ /dev/null @@ -1,756 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job - - -class ListCustomJobsPager: - """A pager for iterating through ``list_custom_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``custom_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListCustomJobs`` requests and continue to iterate - through the ``custom_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListCustomJobsResponse], - request: job_service.ListCustomJobsRequest, - response: job_service.ListCustomJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListCustomJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.ListCustomJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[custom_job.CustomJob]: - for page in self.pages: - yield from page.custom_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListCustomJobsAsyncPager: - """A pager for iterating through ``list_custom_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``custom_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListCustomJobs`` requests and continue to iterate - through the ``custom_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]], - request: job_service.ListCustomJobsRequest, - response: job_service.ListCustomJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListCustomJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.ListCustomJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[custom_job.CustomJob]: - async def async_generator(): - async for page in self.pages: - for response in page.custom_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataLabelingJobsPager: - """A pager for iterating through ``list_data_labeling_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``data_labeling_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDataLabelingJobs`` requests and continue to iterate - through the ``data_labeling_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListDataLabelingJobsResponse], - request: job_service.ListDataLabelingJobsRequest, - response: job_service.ListDataLabelingJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListDataLabelingJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.ListDataLabelingJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[data_labeling_job.DataLabelingJob]: - for page in self.pages: - yield from page.data_labeling_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataLabelingJobsAsyncPager: - """A pager for iterating through ``list_data_labeling_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``data_labeling_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDataLabelingJobs`` requests and continue to iterate - through the ``data_labeling_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]], - request: job_service.ListDataLabelingJobsRequest, - response: job_service.ListDataLabelingJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListDataLabelingJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.ListDataLabelingJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[data_labeling_job.DataLabelingJob]: - async def async_generator(): - async for page in self.pages: - for response in page.data_labeling_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHyperparameterTuningJobsPager: - """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``hyperparameter_tuning_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListHyperparameterTuningJobs`` requests and continue to iterate - through the ``hyperparameter_tuning_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListHyperparameterTuningJobsResponse], - request: job_service.ListHyperparameterTuningJobsRequest, - response: job_service.ListHyperparameterTuningJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListHyperparameterTuningJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.ListHyperparameterTuningJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[hyperparameter_tuning_job.HyperparameterTuningJob]: - for page in self.pages: - yield from page.hyperparameter_tuning_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHyperparameterTuningJobsAsyncPager: - """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``hyperparameter_tuning_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListHyperparameterTuningJobs`` requests and continue to iterate - through the ``hyperparameter_tuning_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]], - request: job_service.ListHyperparameterTuningJobsRequest, - response: job_service.ListHyperparameterTuningJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListHyperparameterTuningJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.ListHyperparameterTuningJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[hyperparameter_tuning_job.HyperparameterTuningJob]: - async def async_generator(): - async for page in self.pages: - for response in page.hyperparameter_tuning_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBatchPredictionJobsPager: - """A pager for iterating through ``list_batch_prediction_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``batch_prediction_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListBatchPredictionJobs`` requests and continue to iterate - through the ``batch_prediction_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListBatchPredictionJobsResponse], - request: job_service.ListBatchPredictionJobsRequest, - response: job_service.ListBatchPredictionJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListBatchPredictionJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.ListBatchPredictionJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[batch_prediction_job.BatchPredictionJob]: - for page in self.pages: - yield from page.batch_prediction_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBatchPredictionJobsAsyncPager: - """A pager for iterating through ``list_batch_prediction_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``batch_prediction_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListBatchPredictionJobs`` requests and continue to iterate - through the ``batch_prediction_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]], - request: job_service.ListBatchPredictionJobsRequest, - response: job_service.ListBatchPredictionJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListBatchPredictionJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.ListBatchPredictionJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[batch_prediction_job.BatchPredictionJob]: - async def async_generator(): - async for page in self.pages: - for response in page.batch_prediction_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchModelDeploymentMonitoringStatsAnomaliesPager: - """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``monitoring_stats`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate - through the ``monitoring_stats`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse], - request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, - response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: - for page in self.pages: - yield from page.monitoring_stats - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: - """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``monitoring_stats`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate - through the ``monitoring_stats`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]], - request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, - response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: - async def async_generator(): - async for page in self.pages: - for response in page.monitoring_stats: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelDeploymentMonitoringJobsPager: - """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_deployment_monitoring_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate - through the ``model_deployment_monitoring_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListModelDeploymentMonitoringJobsResponse], - request: job_service.ListModelDeploymentMonitoringJobsRequest, - response: job_service.ListModelDeploymentMonitoringJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[job_service.ListModelDeploymentMonitoringJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - for page in self.pages: - yield from page.model_deployment_monitoring_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelDeploymentMonitoringJobsAsyncPager: - """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_deployment_monitoring_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate - through the ``model_deployment_monitoring_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]], - request: job_service.ListModelDeploymentMonitoringJobsRequest, - response: job_service.ListModelDeploymentMonitoringJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[job_service.ListModelDeploymentMonitoringJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - async def async_generator(): - async for page in self.pages: - for response in page.model_deployment_monitoring_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py deleted file mode 100644 index 13c5f7ade5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import JobServiceTransport -from .grpc import JobServiceGrpcTransport -from .grpc_asyncio import JobServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] -_transport_registry['grpc'] = JobServiceGrpcTransport -_transport_registry['grpc_asyncio'] = JobServiceGrpcAsyncIOTransport - -__all__ = ( - 'JobServiceTransport', - 'JobServiceGrpcTransport', - 'JobServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py deleted file mode 100644 index b611bb10d8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ /dev/null @@ -1,564 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class JobServiceTransport(abc.ABC): - """Abstract transport class for JobService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_custom_job: gapic_v1.method.wrap_method( - self.create_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_custom_job: gapic_v1.method.wrap_method( - self.get_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_custom_jobs: gapic_v1.method.wrap_method( - self.list_custom_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_custom_job: gapic_v1.method.wrap_method( - self.delete_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_custom_job: gapic_v1.method.wrap_method( - self.cancel_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_data_labeling_job: gapic_v1.method.wrap_method( - self.create_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_data_labeling_job: gapic_v1.method.wrap_method( - self.get_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_data_labeling_jobs: gapic_v1.method.wrap_method( - self.list_data_labeling_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_data_labeling_job: gapic_v1.method.wrap_method( - self.delete_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_data_labeling_job: gapic_v1.method.wrap_method( - self.cancel_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.create_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.get_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( - self.list_hyperparameter_tuning_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.delete_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.cancel_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_batch_prediction_job: gapic_v1.method.wrap_method( - self.create_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_batch_prediction_job: gapic_v1.method.wrap_method( - self.get_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( - self.list_batch_prediction_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_batch_prediction_job: gapic_v1.method.wrap_method( - self.delete_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( - self.cancel_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.create_model_deployment_monitoring_job, - default_timeout=60.0, - client_info=client_info, - ), - self.search_model_deployment_monitoring_stats_anomalies: gapic_v1.method.wrap_method( - self.search_model_deployment_monitoring_stats_anomalies, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.get_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_deployment_monitoring_jobs: gapic_v1.method.wrap_method( - self.list_model_deployment_monitoring_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.update_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.update_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.delete_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - self.pause_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.pause_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - self.resume_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.resume_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - Union[ - gca_custom_job.CustomJob, - Awaitable[gca_custom_job.CustomJob] - ]]: - raise NotImplementedError() - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - Union[ - custom_job.CustomJob, - Awaitable[custom_job.CustomJob] - ]]: - raise NotImplementedError() - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - Union[ - job_service.ListCustomJobsResponse, - Awaitable[job_service.ListCustomJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - Union[ - gca_data_labeling_job.DataLabelingJob, - Awaitable[gca_data_labeling_job.DataLabelingJob] - ]]: - raise NotImplementedError() - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - Union[ - data_labeling_job.DataLabelingJob, - Awaitable[data_labeling_job.DataLabelingJob] - ]]: - raise NotImplementedError() - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - Union[ - job_service.ListDataLabelingJobsResponse, - Awaitable[job_service.ListDataLabelingJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - Union[ - gca_hyperparameter_tuning_job.HyperparameterTuningJob, - Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob] - ]]: - raise NotImplementedError() - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - Union[ - hyperparameter_tuning_job.HyperparameterTuningJob, - Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob] - ]]: - raise NotImplementedError() - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - Union[ - job_service.ListHyperparameterTuningJobsResponse, - Awaitable[job_service.ListHyperparameterTuningJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - Union[ - gca_batch_prediction_job.BatchPredictionJob, - Awaitable[gca_batch_prediction_job.BatchPredictionJob] - ]]: - raise NotImplementedError() - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - Union[ - batch_prediction_job.BatchPredictionJob, - Awaitable[batch_prediction_job.BatchPredictionJob] - ]]: - raise NotImplementedError() - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - Union[ - job_service.ListBatchPredictionJobsResponse, - Awaitable[job_service.ListBatchPredictionJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_model_deployment_monitoring_job(self) -> Callable[ - [job_service.CreateModelDeploymentMonitoringJobRequest], - Union[ - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob] - ]]: - raise NotImplementedError() - - @property - def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ - [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - Union[ - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, - Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse] - ]]: - raise NotImplementedError() - - @property - def get_model_deployment_monitoring_job(self) -> Callable[ - [job_service.GetModelDeploymentMonitoringJobRequest], - Union[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob] - ]]: - raise NotImplementedError() - - @property - def list_model_deployment_monitoring_jobs(self) -> Callable[ - [job_service.ListModelDeploymentMonitoringJobsRequest], - Union[ - job_service.ListModelDeploymentMonitoringJobsResponse, - Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse] - ]]: - raise NotImplementedError() - - @property - def update_model_deployment_monitoring_job(self) -> Callable[ - [job_service.UpdateModelDeploymentMonitoringJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_model_deployment_monitoring_job(self) -> Callable[ - [job_service.DeleteModelDeploymentMonitoringJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def pause_model_deployment_monitoring_job(self) -> Callable[ - [job_service.PauseModelDeploymentMonitoringJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def resume_model_deployment_monitoring_job(self) -> Callable[ - [job_service.ResumeModelDeploymentMonitoringJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'JobServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py deleted file mode 100644 index 55a5ebcbb2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ /dev/null @@ -1,1043 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobServiceTransport, DEFAULT_CLIENT_INFO - - -class JobServiceGrpcTransport(JobServiceTransport): - """gRPC backend transport for JobService. - - A service for creating and managing Vertex AI's jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - gca_custom_job.CustomJob]: - r"""Return a callable for the create custom job method over gRPC. - - Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Returns: - Callable[[~.CreateCustomJobRequest], - ~.CustomJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_custom_job' not in self._stubs: - self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateCustomJob', - request_serializer=job_service.CreateCustomJobRequest.serialize, - response_deserializer=gca_custom_job.CustomJob.deserialize, - ) - return self._stubs['create_custom_job'] - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - custom_job.CustomJob]: - r"""Return a callable for the get custom job method over gRPC. - - Gets a CustomJob. - - Returns: - Callable[[~.GetCustomJobRequest], - ~.CustomJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_custom_job' not in self._stubs: - self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetCustomJob', - request_serializer=job_service.GetCustomJobRequest.serialize, - response_deserializer=custom_job.CustomJob.deserialize, - ) - return self._stubs['get_custom_job'] - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - job_service.ListCustomJobsResponse]: - r"""Return a callable for the list custom jobs method over gRPC. - - Lists CustomJobs in a Location. - - Returns: - Callable[[~.ListCustomJobsRequest], - ~.ListCustomJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_custom_jobs' not in self._stubs: - self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListCustomJobs', - request_serializer=job_service.ListCustomJobsRequest.serialize, - response_deserializer=job_service.ListCustomJobsResponse.deserialize, - ) - return self._stubs['list_custom_jobs'] - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete custom job method over gRPC. - - Deletes a CustomJob. - - Returns: - Callable[[~.DeleteCustomJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_custom_job' not in self._stubs: - self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteCustomJob', - request_serializer=job_service.DeleteCustomJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_custom_job'] - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel custom job method over gRPC. - - Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelCustomJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_custom_job' not in self._stubs: - self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelCustomJob', - request_serializer=job_service.CancelCustomJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_custom_job'] - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - gca_data_labeling_job.DataLabelingJob]: - r"""Return a callable for the create data labeling job method over gRPC. - - Creates a DataLabelingJob. - - Returns: - Callable[[~.CreateDataLabelingJobRequest], - ~.DataLabelingJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_data_labeling_job' not in self._stubs: - self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateDataLabelingJob', - request_serializer=job_service.CreateDataLabelingJobRequest.serialize, - response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['create_data_labeling_job'] - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - data_labeling_job.DataLabelingJob]: - r"""Return a callable for the get data labeling job method over gRPC. - - Gets a DataLabelingJob. - - Returns: - Callable[[~.GetDataLabelingJobRequest], - ~.DataLabelingJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_data_labeling_job' not in self._stubs: - self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetDataLabelingJob', - request_serializer=job_service.GetDataLabelingJobRequest.serialize, - response_deserializer=data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['get_data_labeling_job'] - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - job_service.ListDataLabelingJobsResponse]: - r"""Return a callable for the list data labeling jobs method over gRPC. - - Lists DataLabelingJobs in a Location. - - Returns: - Callable[[~.ListDataLabelingJobsRequest], - ~.ListDataLabelingJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_labeling_jobs' not in self._stubs: - self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListDataLabelingJobs', - request_serializer=job_service.ListDataLabelingJobsRequest.serialize, - response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, - ) - return self._stubs['list_data_labeling_jobs'] - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete data labeling job method over gRPC. - - Deletes a DataLabelingJob. - - Returns: - Callable[[~.DeleteDataLabelingJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_data_labeling_job' not in self._stubs: - self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteDataLabelingJob', - request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_data_labeling_job'] - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel data labeling job method over gRPC. - - Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Returns: - Callable[[~.CancelDataLabelingJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_data_labeling_job' not in self._stubs: - self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelDataLabelingJob', - request_serializer=job_service.CancelDataLabelingJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_data_labeling_job'] - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - gca_hyperparameter_tuning_job.HyperparameterTuningJob]: - r"""Return a callable for the create hyperparameter tuning - job method over gRPC. - - Creates a HyperparameterTuningJob - - Returns: - Callable[[~.CreateHyperparameterTuningJobRequest], - ~.HyperparameterTuningJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_hyperparameter_tuning_job' not in self._stubs: - self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateHyperparameterTuningJob', - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['create_hyperparameter_tuning_job'] - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - hyperparameter_tuning_job.HyperparameterTuningJob]: - r"""Return a callable for the get hyperparameter tuning job method over gRPC. - - Gets a HyperparameterTuningJob - - Returns: - Callable[[~.GetHyperparameterTuningJobRequest], - ~.HyperparameterTuningJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_hyperparameter_tuning_job' not in self._stubs: - self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetHyperparameterTuningJob', - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['get_hyperparameter_tuning_job'] - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - job_service.ListHyperparameterTuningJobsResponse]: - r"""Return a callable for the list hyperparameter tuning - jobs method over gRPC. - - Lists HyperparameterTuningJobs in a Location. - - Returns: - Callable[[~.ListHyperparameterTuningJobsRequest], - ~.ListHyperparameterTuningJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hyperparameter_tuning_jobs' not in self._stubs: - self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListHyperparameterTuningJobs', - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, - ) - return self._stubs['list_hyperparameter_tuning_jobs'] - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete hyperparameter tuning - job method over gRPC. - - Deletes a HyperparameterTuningJob. - - Returns: - Callable[[~.DeleteHyperparameterTuningJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_hyperparameter_tuning_job' not in self._stubs: - self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteHyperparameterTuningJob', - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_hyperparameter_tuning_job'] - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel hyperparameter tuning - job method over gRPC. - - Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelHyperparameterTuningJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_hyperparameter_tuning_job' not in self._stubs: - self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelHyperparameterTuningJob', - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_hyperparameter_tuning_job'] - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - gca_batch_prediction_job.BatchPredictionJob]: - r"""Return a callable for the create batch prediction job method over gRPC. - - Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Returns: - Callable[[~.CreateBatchPredictionJobRequest], - ~.BatchPredictionJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_batch_prediction_job' not in self._stubs: - self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateBatchPredictionJob', - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['create_batch_prediction_job'] - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - batch_prediction_job.BatchPredictionJob]: - r"""Return a callable for the get batch prediction job method over gRPC. - - Gets a BatchPredictionJob - - Returns: - Callable[[~.GetBatchPredictionJobRequest], - ~.BatchPredictionJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_batch_prediction_job' not in self._stubs: - self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetBatchPredictionJob', - request_serializer=job_service.GetBatchPredictionJobRequest.serialize, - response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['get_batch_prediction_job'] - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - job_service.ListBatchPredictionJobsResponse]: - r"""Return a callable for the list batch prediction jobs method over gRPC. - - Lists BatchPredictionJobs in a Location. - - Returns: - Callable[[~.ListBatchPredictionJobsRequest], - ~.ListBatchPredictionJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_batch_prediction_jobs' not in self._stubs: - self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListBatchPredictionJobs', - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, - ) - return self._stubs['list_batch_prediction_jobs'] - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete batch prediction job method over gRPC. - - Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Returns: - Callable[[~.DeleteBatchPredictionJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_batch_prediction_job' not in self._stubs: - self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteBatchPredictionJob', - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_batch_prediction_job'] - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel batch prediction job method over gRPC. - - Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Returns: - Callable[[~.CancelBatchPredictionJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_batch_prediction_job' not in self._stubs: - self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelBatchPredictionJob', - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_batch_prediction_job'] - - @property - def create_model_deployment_monitoring_job(self) -> Callable[ - [job_service.CreateModelDeploymentMonitoringJobRequest], - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - r"""Return a callable for the create model deployment - monitoring job method over gRPC. - - Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Returns: - Callable[[~.CreateModelDeploymentMonitoringJobRequest], - ~.ModelDeploymentMonitoringJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model_deployment_monitoring_job' not in self._stubs: - self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob', - request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['create_model_deployment_monitoring_job'] - - @property - def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ - [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - r"""Return a callable for the search model deployment - monitoring stats anomalies method over gRPC. - - Searches Model Monitoring Statistics generated within - a given time window. - - Returns: - Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - ~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: - self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', - request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, - response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, - ) - return self._stubs['search_model_deployment_monitoring_stats_anomalies'] - - @property - def get_model_deployment_monitoring_job(self) -> Callable[ - [job_service.GetModelDeploymentMonitoringJobRequest], - model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - r"""Return a callable for the get model deployment - monitoring job method over gRPC. - - Gets a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.GetModelDeploymentMonitoringJobRequest], - ~.ModelDeploymentMonitoringJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_deployment_monitoring_job' not in self._stubs: - self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob', - request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['get_model_deployment_monitoring_job'] - - @property - def list_model_deployment_monitoring_jobs(self) -> Callable[ - [job_service.ListModelDeploymentMonitoringJobsRequest], - job_service.ListModelDeploymentMonitoringJobsResponse]: - r"""Return a callable for the list model deployment - monitoring jobs method over gRPC. - - Lists ModelDeploymentMonitoringJobs in a Location. - - Returns: - Callable[[~.ListModelDeploymentMonitoringJobsRequest], - ~.ListModelDeploymentMonitoringJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_deployment_monitoring_jobs' not in self._stubs: - self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs', - request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, - response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, - ) - return self._stubs['list_model_deployment_monitoring_jobs'] - - @property - def update_model_deployment_monitoring_job(self) -> Callable[ - [job_service.UpdateModelDeploymentMonitoringJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the update model deployment - monitoring job method over gRPC. - - Updates a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.UpdateModelDeploymentMonitoringJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model_deployment_monitoring_job' not in self._stubs: - self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob', - request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_model_deployment_monitoring_job'] - - @property - def delete_model_deployment_monitoring_job(self) -> Callable[ - [job_service.DeleteModelDeploymentMonitoringJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete model deployment - monitoring job method over gRPC. - - Deletes a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.DeleteModelDeploymentMonitoringJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model_deployment_monitoring_job' not in self._stubs: - self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob', - request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model_deployment_monitoring_job'] - - @property - def pause_model_deployment_monitoring_job(self) -> Callable[ - [job_service.PauseModelDeploymentMonitoringJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the pause model deployment - monitoring job method over gRPC. - - Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Returns: - Callable[[~.PauseModelDeploymentMonitoringJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'pause_model_deployment_monitoring_job' not in self._stubs: - self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob', - request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['pause_model_deployment_monitoring_job'] - - @property - def resume_model_deployment_monitoring_job(self) -> Callable[ - [job_service.ResumeModelDeploymentMonitoringJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the resume model deployment - monitoring job method over gRPC. - - Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Returns: - Callable[[~.ResumeModelDeploymentMonitoringJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'resume_model_deployment_monitoring_job' not in self._stubs: - self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob', - request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['resume_model_deployment_monitoring_job'] - - -__all__ = ( - 'JobServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py deleted file mode 100644 index cc7102577e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,1047 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import JobServiceGrpcTransport - - -class JobServiceGrpcAsyncIOTransport(JobServiceTransport): - """gRPC AsyncIO backend transport for JobService. - - A service for creating and managing Vertex AI's jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - Awaitable[gca_custom_job.CustomJob]]: - r"""Return a callable for the create custom job method over gRPC. - - Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Returns: - Callable[[~.CreateCustomJobRequest], - Awaitable[~.CustomJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_custom_job' not in self._stubs: - self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateCustomJob', - request_serializer=job_service.CreateCustomJobRequest.serialize, - response_deserializer=gca_custom_job.CustomJob.deserialize, - ) - return self._stubs['create_custom_job'] - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - Awaitable[custom_job.CustomJob]]: - r"""Return a callable for the get custom job method over gRPC. - - Gets a CustomJob. - - Returns: - Callable[[~.GetCustomJobRequest], - Awaitable[~.CustomJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_custom_job' not in self._stubs: - self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetCustomJob', - request_serializer=job_service.GetCustomJobRequest.serialize, - response_deserializer=custom_job.CustomJob.deserialize, - ) - return self._stubs['get_custom_job'] - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - Awaitable[job_service.ListCustomJobsResponse]]: - r"""Return a callable for the list custom jobs method over gRPC. - - Lists CustomJobs in a Location. - - Returns: - Callable[[~.ListCustomJobsRequest], - Awaitable[~.ListCustomJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_custom_jobs' not in self._stubs: - self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListCustomJobs', - request_serializer=job_service.ListCustomJobsRequest.serialize, - response_deserializer=job_service.ListCustomJobsResponse.deserialize, - ) - return self._stubs['list_custom_jobs'] - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete custom job method over gRPC. - - Deletes a CustomJob. - - Returns: - Callable[[~.DeleteCustomJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_custom_job' not in self._stubs: - self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteCustomJob', - request_serializer=job_service.DeleteCustomJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_custom_job'] - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel custom job method over gRPC. - - Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelCustomJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_custom_job' not in self._stubs: - self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelCustomJob', - request_serializer=job_service.CancelCustomJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_custom_job'] - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - Awaitable[gca_data_labeling_job.DataLabelingJob]]: - r"""Return a callable for the create data labeling job method over gRPC. - - Creates a DataLabelingJob. - - Returns: - Callable[[~.CreateDataLabelingJobRequest], - Awaitable[~.DataLabelingJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_data_labeling_job' not in self._stubs: - self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateDataLabelingJob', - request_serializer=job_service.CreateDataLabelingJobRequest.serialize, - response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['create_data_labeling_job'] - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - Awaitable[data_labeling_job.DataLabelingJob]]: - r"""Return a callable for the get data labeling job method over gRPC. - - Gets a DataLabelingJob. - - Returns: - Callable[[~.GetDataLabelingJobRequest], - Awaitable[~.DataLabelingJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_data_labeling_job' not in self._stubs: - self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetDataLabelingJob', - request_serializer=job_service.GetDataLabelingJobRequest.serialize, - response_deserializer=data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['get_data_labeling_job'] - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - Awaitable[job_service.ListDataLabelingJobsResponse]]: - r"""Return a callable for the list data labeling jobs method over gRPC. - - Lists DataLabelingJobs in a Location. - - Returns: - Callable[[~.ListDataLabelingJobsRequest], - Awaitable[~.ListDataLabelingJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_labeling_jobs' not in self._stubs: - self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListDataLabelingJobs', - request_serializer=job_service.ListDataLabelingJobsRequest.serialize, - response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, - ) - return self._stubs['list_data_labeling_jobs'] - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete data labeling job method over gRPC. - - Deletes a DataLabelingJob. - - Returns: - Callable[[~.DeleteDataLabelingJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_data_labeling_job' not in self._stubs: - self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteDataLabelingJob', - request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_data_labeling_job'] - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel data labeling job method over gRPC. - - Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Returns: - Callable[[~.CancelDataLabelingJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_data_labeling_job' not in self._stubs: - self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelDataLabelingJob', - request_serializer=job_service.CancelDataLabelingJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_data_labeling_job'] - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob]]: - r"""Return a callable for the create hyperparameter tuning - job method over gRPC. - - Creates a HyperparameterTuningJob - - Returns: - Callable[[~.CreateHyperparameterTuningJobRequest], - Awaitable[~.HyperparameterTuningJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_hyperparameter_tuning_job' not in self._stubs: - self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateHyperparameterTuningJob', - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['create_hyperparameter_tuning_job'] - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob]]: - r"""Return a callable for the get hyperparameter tuning job method over gRPC. - - Gets a HyperparameterTuningJob - - Returns: - Callable[[~.GetHyperparameterTuningJobRequest], - Awaitable[~.HyperparameterTuningJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_hyperparameter_tuning_job' not in self._stubs: - self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetHyperparameterTuningJob', - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['get_hyperparameter_tuning_job'] - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - Awaitable[job_service.ListHyperparameterTuningJobsResponse]]: - r"""Return a callable for the list hyperparameter tuning - jobs method over gRPC. - - Lists HyperparameterTuningJobs in a Location. - - Returns: - Callable[[~.ListHyperparameterTuningJobsRequest], - Awaitable[~.ListHyperparameterTuningJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hyperparameter_tuning_jobs' not in self._stubs: - self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListHyperparameterTuningJobs', - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, - ) - return self._stubs['list_hyperparameter_tuning_jobs'] - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete hyperparameter tuning - job method over gRPC. - - Deletes a HyperparameterTuningJob. - - Returns: - Callable[[~.DeleteHyperparameterTuningJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_hyperparameter_tuning_job' not in self._stubs: - self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteHyperparameterTuningJob', - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_hyperparameter_tuning_job'] - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel hyperparameter tuning - job method over gRPC. - - Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelHyperparameterTuningJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_hyperparameter_tuning_job' not in self._stubs: - self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelHyperparameterTuningJob', - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_hyperparameter_tuning_job'] - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - Awaitable[gca_batch_prediction_job.BatchPredictionJob]]: - r"""Return a callable for the create batch prediction job method over gRPC. - - Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Returns: - Callable[[~.CreateBatchPredictionJobRequest], - Awaitable[~.BatchPredictionJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_batch_prediction_job' not in self._stubs: - self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateBatchPredictionJob', - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['create_batch_prediction_job'] - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - Awaitable[batch_prediction_job.BatchPredictionJob]]: - r"""Return a callable for the get batch prediction job method over gRPC. - - Gets a BatchPredictionJob - - Returns: - Callable[[~.GetBatchPredictionJobRequest], - Awaitable[~.BatchPredictionJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_batch_prediction_job' not in self._stubs: - self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetBatchPredictionJob', - request_serializer=job_service.GetBatchPredictionJobRequest.serialize, - response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['get_batch_prediction_job'] - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - Awaitable[job_service.ListBatchPredictionJobsResponse]]: - r"""Return a callable for the list batch prediction jobs method over gRPC. - - Lists BatchPredictionJobs in a Location. - - Returns: - Callable[[~.ListBatchPredictionJobsRequest], - Awaitable[~.ListBatchPredictionJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_batch_prediction_jobs' not in self._stubs: - self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListBatchPredictionJobs', - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, - ) - return self._stubs['list_batch_prediction_jobs'] - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete batch prediction job method over gRPC. - - Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Returns: - Callable[[~.DeleteBatchPredictionJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_batch_prediction_job' not in self._stubs: - self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteBatchPredictionJob', - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_batch_prediction_job'] - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel batch prediction job method over gRPC. - - Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Returns: - Callable[[~.CancelBatchPredictionJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_batch_prediction_job' not in self._stubs: - self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelBatchPredictionJob', - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_batch_prediction_job'] - - @property - def create_model_deployment_monitoring_job(self) -> Callable[ - [job_service.CreateModelDeploymentMonitoringJobRequest], - Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: - r"""Return a callable for the create model deployment - monitoring job method over gRPC. - - Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Returns: - Callable[[~.CreateModelDeploymentMonitoringJobRequest], - Awaitable[~.ModelDeploymentMonitoringJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model_deployment_monitoring_job' not in self._stubs: - self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob', - request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['create_model_deployment_monitoring_job'] - - @property - def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ - [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: - r"""Return a callable for the search model deployment - monitoring stats anomalies method over gRPC. - - Searches Model Monitoring Statistics generated within - a given time window. - - Returns: - Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - Awaitable[~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: - self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', - request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, - response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, - ) - return self._stubs['search_model_deployment_monitoring_stats_anomalies'] - - @property - def get_model_deployment_monitoring_job(self) -> Callable[ - [job_service.GetModelDeploymentMonitoringJobRequest], - Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: - r"""Return a callable for the get model deployment - monitoring job method over gRPC. - - Gets a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.GetModelDeploymentMonitoringJobRequest], - Awaitable[~.ModelDeploymentMonitoringJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_deployment_monitoring_job' not in self._stubs: - self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob', - request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['get_model_deployment_monitoring_job'] - - @property - def list_model_deployment_monitoring_jobs(self) -> Callable[ - [job_service.ListModelDeploymentMonitoringJobsRequest], - Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]]: - r"""Return a callable for the list model deployment - monitoring jobs method over gRPC. - - Lists ModelDeploymentMonitoringJobs in a Location. - - Returns: - Callable[[~.ListModelDeploymentMonitoringJobsRequest], - Awaitable[~.ListModelDeploymentMonitoringJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_deployment_monitoring_jobs' not in self._stubs: - self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs', - request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, - response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, - ) - return self._stubs['list_model_deployment_monitoring_jobs'] - - @property - def update_model_deployment_monitoring_job(self) -> Callable[ - [job_service.UpdateModelDeploymentMonitoringJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update model deployment - monitoring job method over gRPC. - - Updates a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.UpdateModelDeploymentMonitoringJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model_deployment_monitoring_job' not in self._stubs: - self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob', - request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_model_deployment_monitoring_job'] - - @property - def delete_model_deployment_monitoring_job(self) -> Callable[ - [job_service.DeleteModelDeploymentMonitoringJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete model deployment - monitoring job method over gRPC. - - Deletes a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.DeleteModelDeploymentMonitoringJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model_deployment_monitoring_job' not in self._stubs: - self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob', - request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model_deployment_monitoring_job'] - - @property - def pause_model_deployment_monitoring_job(self) -> Callable[ - [job_service.PauseModelDeploymentMonitoringJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the pause model deployment - monitoring job method over gRPC. - - Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Returns: - Callable[[~.PauseModelDeploymentMonitoringJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'pause_model_deployment_monitoring_job' not in self._stubs: - self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob', - request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['pause_model_deployment_monitoring_job'] - - @property - def resume_model_deployment_monitoring_job(self) -> Callable[ - [job_service.ResumeModelDeploymentMonitoringJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the resume model deployment - monitoring job method over gRPC. - - Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Returns: - Callable[[~.ResumeModelDeploymentMonitoringJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'resume_model_deployment_monitoring_job' not in self._stubs: - self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob', - request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['resume_model_deployment_monitoring_job'] - - -__all__ = ( - 'JobServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py deleted file mode 100644 index b0a31fc612..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import MetadataServiceClient -from .async_client import MetadataServiceAsyncClient - -__all__ = ( - 'MetadataServiceClient', - 'MetadataServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py deleted file mode 100644 index 67ae60978b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py +++ /dev/null @@ -1,2516 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport -from .client import MetadataServiceClient - - -class MetadataServiceAsyncClient: - """Service for reading and writing metadata entries.""" - - _client: MetadataServiceClient - - DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT - - artifact_path = staticmethod(MetadataServiceClient.artifact_path) - parse_artifact_path = staticmethod(MetadataServiceClient.parse_artifact_path) - context_path = staticmethod(MetadataServiceClient.context_path) - parse_context_path = staticmethod(MetadataServiceClient.parse_context_path) - execution_path = staticmethod(MetadataServiceClient.execution_path) - parse_execution_path = staticmethod(MetadataServiceClient.parse_execution_path) - metadata_schema_path = staticmethod(MetadataServiceClient.metadata_schema_path) - parse_metadata_schema_path = staticmethod(MetadataServiceClient.parse_metadata_schema_path) - metadata_store_path = staticmethod(MetadataServiceClient.metadata_store_path) - parse_metadata_store_path = staticmethod(MetadataServiceClient.parse_metadata_store_path) - common_billing_account_path = staticmethod(MetadataServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(MetadataServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(MetadataServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(MetadataServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(MetadataServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(MetadataServiceClient.parse_common_organization_path) - common_project_path = staticmethod(MetadataServiceClient.common_project_path) - parse_common_project_path = staticmethod(MetadataServiceClient.parse_common_project_path) - common_location_path = staticmethod(MetadataServiceClient.common_location_path) - parse_common_location_path = staticmethod(MetadataServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceAsyncClient: The constructed client. - """ - return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceAsyncClient: The constructed client. - """ - return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MetadataServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MetadataServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, MetadataServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the metadata service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.MetadataServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = MetadataServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_metadata_store(self, - request: metadata_service.CreateMetadataStoreRequest = None, - *, - parent: str = None, - metadata_store: gca_metadata_store.MetadataStore = None, - metadata_store_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Initializes a MetadataStore, including allocation of - resources. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest`): - The request object. Request message for - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. - parent (:class:`str`): - Required. The resource name of the - Location where the MetadataStore should - be created. Format: - projects/{project}/locations/{location}/ - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store (:class:`google.cloud.aiplatform_v1beta1.types.MetadataStore`): - Required. The MetadataStore to - create. - - This corresponds to the ``metadata_store`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store_id (:class:`str`): - The {metadatastore} portion of the resource name with - the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all MetadataStores in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting MetadataStore.) - - This corresponds to the ``metadata_store_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_store, metadata_store_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateMetadataStoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_store is not None: - request.metadata_store = metadata_store - if metadata_store_id is not None: - request.metadata_store_id = metadata_store_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_metadata_store, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_metadata_store.MetadataStore, - metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_metadata_store(self, - request: metadata_service.GetMetadataStoreRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_store.MetadataStore: - r"""Retrieves a specific MetadataStore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest`): - The request object. Request message for - [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. - name (:class:`str`): - Required. The resource name of the - MetadataStore to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataStore: - Instance of a metadata store. - Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetMetadataStoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_metadata_store, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_metadata_stores(self, - request: metadata_service.ListMetadataStoresRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataStoresAsyncPager: - r"""Lists MetadataStores for a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest`): - The request object. Request message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - parent (:class:`str`): - Required. The Location whose - MetadataStores should be listed. Format: - projects/{project}/locations/{location} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresAsyncPager: - Response message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListMetadataStoresRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_metadata_stores, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListMetadataStoresAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_metadata_store(self, - request: metadata_service.DeleteMetadataStoreRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single MetadataStore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest`): - The request object. Request message for - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. - name (:class:`str`): - Required. The resource name of the - MetadataStore to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteMetadataStoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_metadata_store, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_artifact(self, - request: metadata_service.CreateArtifactRequest = None, - *, - parent: str = None, - artifact: gca_artifact.Artifact = None, - artifact_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Creates an Artifact associated with a MetadataStore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest`): - The request object. Request message for - [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. - parent (:class:`str`): - Required. The resource name of the - MetadataStore where the Artifact should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): - Required. The Artifact to create. - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact_id (:class:`str`): - The {artifact} portion of the resource name with the - format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - If not provided, the Artifact's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all Artifacts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Artifact.) - - This corresponds to the ``artifact_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, artifact, artifact_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if artifact is not None: - request.artifact = artifact - if artifact_id is not None: - request.artifact_id = artifact_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_artifact, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_artifact(self, - request: metadata_service.GetArtifactRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> artifact.Artifact: - r"""Retrieves a specific Artifact. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetArtifactRequest`): - The request object. Request message for - [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. - name (:class:`str`): - Required. The resource name of the - Artifact to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_artifact, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_artifacts(self, - request: metadata_service.ListArtifactsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListArtifactsAsyncPager: - r"""Lists Artifacts in the MetadataStore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest`): - The request object. Request message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - parent (:class:`str`): - Required. The MetadataStore whose - Artifacts should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsAsyncPager: - Response message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListArtifactsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_artifacts, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListArtifactsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_artifact(self, - request: metadata_service.UpdateArtifactRequest = None, - *, - artifact: gca_artifact.Artifact = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Updates a stored Artifact. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest`): - The request object. Request message for - [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. - artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): - Required. The Artifact containing updates. The - Artifact's - [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] - field is used to identify the Artifact to be updated. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating - which fields should be updated. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.UpdateArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_artifact, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact.name", request.artifact.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_context(self, - request: metadata_service.CreateContextRequest = None, - *, - parent: str = None, - context: gca_context.Context = None, - context_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Creates a Context associated with a MetadataStore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateContextRequest`): - The request object. Request message for - [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. - parent (:class:`str`): - Required. The resource name of the - MetadataStore where the Context should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): - Required. The Context to create. - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context_id (:class:`str`): - The {context} portion of the resource name with the - format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. - If not provided, the Context's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all Contexts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Context.) - - This corresponds to the ``context_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, context, context_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if context is not None: - request.context = context - if context_id is not None: - request.context_id = context_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_context, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_context(self, - request: metadata_service.GetContextRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> context.Context: - r"""Retrieves a specific Context. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetContextRequest`): - The request object. Request message for - [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. - name (:class:`str`): - Required. The resource name of the - Context to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_context, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_contexts(self, - request: metadata_service.ListContextsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListContextsAsyncPager: - r"""Lists Contexts on the MetadataStore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListContextsRequest`): - The request object. Request message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] - parent (:class:`str`): - Required. The MetadataStore whose - Contexts should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsAsyncPager: - Response message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListContextsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_contexts, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListContextsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_context(self, - request: metadata_service.UpdateContextRequest = None, - *, - context: gca_context.Context = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Updates a stored Context. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateContextRequest`): - The request object. Request message for - [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. - context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): - Required. The Context containing updates. The Context's - [Context.name][google.cloud.aiplatform.v1beta1.Context.name] - field is used to identify the Context to be updated. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating - which fields should be updated. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.UpdateContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_context, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context.name", request.context.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_context(self, - request: metadata_service.DeleteContextRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a stored Context. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteContextRequest`): - The request object. Request message for - [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. - name (:class:`str`): - Required. The resource name of the - Context to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_context, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def add_context_artifacts_and_executions(self, - request: metadata_service.AddContextArtifactsAndExecutionsRequest = None, - *, - context: str = None, - artifacts: Sequence[str] = None, - executions: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: - r"""Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest`): - The request object. Request message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - context (:class:`str`): - Required. The resource name of the - Context that the Artifacts and - Executions belong to. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifacts (:class:`Sequence[str]`): - The resource names of the Artifacts - to attribute to the Context. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - - This corresponds to the ``artifacts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - executions (:class:`Sequence[str]`): - The resource names of the Executions - to associate with the Context. - - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``executions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: - Response message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, artifacts, executions]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if artifacts: - request.artifacts.extend(artifacts) - if executions: - request.executions.extend(executions) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_context_artifacts_and_executions, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def add_context_children(self, - request: metadata_service.AddContextChildrenRequest = None, - *, - context: str = None, - child_contexts: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextChildrenResponse: - r"""Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest`): - The request object. Request message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - context (:class:`str`): - Required. The resource name of the - parent Context. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - child_contexts (:class:`Sequence[str]`): - The resource names of the child - Contexts. - - This corresponds to the ``child_contexts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: - Response message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.AddContextChildrenRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if child_contexts: - request.child_contexts.extend(child_contexts) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_context_children, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def query_context_lineage_subgraph(self, - request: metadata_service.QueryContextLineageSubgraphRequest = None, - *, - context: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest`): - The request object. Request message for - [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. - context (:class:`str`): - Required. The resource name of the Context whose - Artifacts and Executions should be retrieved as a - LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.QueryContextLineageSubgraphRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.query_context_lineage_subgraph, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_execution(self, - request: metadata_service.CreateExecutionRequest = None, - *, - parent: str = None, - execution: gca_execution.Execution = None, - execution_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Creates an Execution associated with a MetadataStore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest`): - The request object. Request message for - [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. - parent (:class:`str`): - Required. The resource name of the - MetadataStore where the Execution should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): - Required. The Execution to create. - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution_id (:class:`str`): - The {execution} portion of the resource name with the - format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - If not provided, the Execution's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all Executions in the parent - MetadataStore. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting Execution.) - - This corresponds to the ``execution_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, execution, execution_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if execution is not None: - request.execution = execution - if execution_id is not None: - request.execution_id = execution_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_execution, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_execution(self, - request: metadata_service.GetExecutionRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> execution.Execution: - r"""Retrieves a specific Execution. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetExecutionRequest`): - The request object. Request message for - [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. - name (:class:`str`): - Required. The resource name of the - Execution to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_execution, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_executions(self, - request: metadata_service.ListExecutionsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListExecutionsAsyncPager: - r"""Lists Executions in the MetadataStore. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest`): - The request object. Request message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - parent (:class:`str`): - Required. The MetadataStore whose - Executions should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsAsyncPager: - Response message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListExecutionsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_executions, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListExecutionsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_execution(self, - request: metadata_service.UpdateExecutionRequest = None, - *, - execution: gca_execution.Execution = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Updates a stored Execution. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest`): - The request object. Request message for - [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. - execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): - Required. The Execution containing updates. The - Execution's - [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] - field is used to identify the Execution to be updated. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating - which fields should be updated. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.UpdateExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_execution, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution.name", request.execution.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def add_execution_events(self, - request: metadata_service.AddExecutionEventsRequest = None, - *, - execution: str = None, - events: Sequence[event.Event] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddExecutionEventsResponse: - r"""Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest`): - The request object. Request message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - execution (:class:`str`): - Required. The resource name of the - Execution that the Events connect - Artifacts with. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - events (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.Event]`): - The Events to create and add. - This corresponds to the ``events`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: - Response message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, events]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.AddExecutionEventsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if events: - request.events.extend(events) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_execution_events, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def query_execution_inputs_and_outputs(self, - request: metadata_service.QueryExecutionInputsAndOutputsRequest = None, - *, - execution: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest`): - The request object. Request message for - [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. - execution (:class:`str`): - Required. The resource name of the - Execution whose input and output - Artifacts should be retrieved as a - LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.query_execution_inputs_and_outputs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_metadata_schema(self, - request: metadata_service.CreateMetadataSchemaRequest = None, - *, - parent: str = None, - metadata_schema: gca_metadata_schema.MetadataSchema = None, - metadata_schema_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_metadata_schema.MetadataSchema: - r"""Creates a MetadataSchema. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest`): - The request object. Request message for - [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. - parent (:class:`str`): - Required. The resource name of the - MetadataStore where the MetadataSchema - should be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema (:class:`google.cloud.aiplatform_v1beta1.types.MetadataSchema`): - Required. The MetadataSchema to - create. - - This corresponds to the ``metadata_schema`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema_id (:class:`str`): - The {metadata_schema} portion of the resource name with - the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all MetadataSchemas in the parent - Location. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting MetadataSchema.) - - This corresponds to the ``metadata_schema_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateMetadataSchemaRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_schema is not None: - request.metadata_schema = metadata_schema - if metadata_schema_id is not None: - request.metadata_schema_id = metadata_schema_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_metadata_schema, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_metadata_schema(self, - request: metadata_service.GetMetadataSchemaRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_schema.MetadataSchema: - r"""Retrieves a specific MetadataSchema. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest`): - The request object. Request message for - [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. - name (:class:`str`): - Required. The resource name of the - MetadataSchema to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetMetadataSchemaRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_metadata_schema, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_metadata_schemas(self, - request: metadata_service.ListMetadataSchemasRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataSchemasAsyncPager: - r"""Lists MetadataSchemas. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest`): - The request object. Request message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - parent (:class:`str`): - Required. The MetadataStore whose - MetadataSchemas should be listed. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager: - Response message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListMetadataSchemasRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_metadata_schemas, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListMetadataSchemasAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def query_artifact_lineage_subgraph(self, - request: metadata_service.QueryArtifactLineageSubgraphRequest = None, - *, - artifact: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest`): - The request object. Request message for - [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. - artifact (:class:`str`): - Required. The resource name of the Artifact whose - Lineage needs to be retrieved as a LineageSubgraph. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.QueryArtifactLineageSubgraphRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.query_artifact_lineage_subgraph, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact", request.artifact), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MetadataServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py deleted file mode 100644 index 3ef295ab55..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py +++ /dev/null @@ -1,2739 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import MetadataServiceGrpcTransport -from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport - - -class MetadataServiceClientMeta(type): - """Metaclass for the MetadataService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] - _transport_registry["grpc"] = MetadataServiceGrpcTransport - _transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[MetadataServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class MetadataServiceClient(metaclass=MetadataServiceClientMeta): - """Service for reading and writing metadata entries.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MetadataServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MetadataServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: - """Returns a fully-qualified artifact string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - - @staticmethod - def parse_artifact_path(path: str) -> Dict[str,str]: - """Parses a artifact path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: - """Returns a fully-qualified context string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - - @staticmethod - def parse_context_path(path: str) -> Dict[str,str]: - """Parses a context path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: - """Returns a fully-qualified execution string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - - @staticmethod - def parse_execution_path(path: str) -> Dict[str,str]: - """Parses a execution path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def metadata_schema_path(project: str,location: str,metadata_store: str,metadata_schema: str,) -> str: - """Returns a fully-qualified metadata_schema string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) - - @staticmethod - def parse_metadata_schema_path(path: str) -> Dict[str,str]: - """Parses a metadata_schema path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/metadataSchemas/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def metadata_store_path(project: str,location: str,metadata_store: str,) -> str: - """Returns a fully-qualified metadata_store string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) - - @staticmethod - def parse_metadata_store_path(path: str) -> Dict[str,str]: - """Parses a metadata_store path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, MetadataServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the metadata service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, MetadataServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, MetadataServiceTransport): - # transport is a MetadataServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_metadata_store(self, - request: metadata_service.CreateMetadataStoreRequest = None, - *, - parent: str = None, - metadata_store: gca_metadata_store.MetadataStore = None, - metadata_store_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Initializes a MetadataStore, including allocation of - resources. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest): - The request object. Request message for - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. - parent (str): - Required. The resource name of the - Location where the MetadataStore should - be created. Format: - projects/{project}/locations/{location}/ - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): - Required. The MetadataStore to - create. - - This corresponds to the ``metadata_store`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store_id (str): - The {metadatastore} portion of the resource name with - the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all MetadataStores in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting MetadataStore.) - - This corresponds to the ``metadata_store_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_store, metadata_store_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateMetadataStoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateMetadataStoreRequest): - request = metadata_service.CreateMetadataStoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_store is not None: - request.metadata_store = metadata_store - if metadata_store_id is not None: - request.metadata_store_id = metadata_store_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_metadata_store] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_metadata_store.MetadataStore, - metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - def get_metadata_store(self, - request: metadata_service.GetMetadataStoreRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_store.MetadataStore: - r"""Retrieves a specific MetadataStore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest): - The request object. Request message for - [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. - name (str): - Required. The resource name of the - MetadataStore to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataStore: - Instance of a metadata store. - Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetMetadataStoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetMetadataStoreRequest): - request = metadata_service.GetMetadataStoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_metadata_store] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_metadata_stores(self, - request: metadata_service.ListMetadataStoresRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataStoresPager: - r"""Lists MetadataStores for a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): - The request object. Request message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - parent (str): - Required. The Location whose - MetadataStores should be listed. Format: - projects/{project}/locations/{location} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresPager: - Response message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListMetadataStoresRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListMetadataStoresRequest): - request = metadata_service.ListMetadataStoresRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_metadata_stores] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListMetadataStoresPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_metadata_store(self, - request: metadata_service.DeleteMetadataStoreRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single MetadataStore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest): - The request object. Request message for - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. - name (str): - Required. The resource name of the - MetadataStore to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteMetadataStoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteMetadataStoreRequest): - request = metadata_service.DeleteMetadataStoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_metadata_store] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - def create_artifact(self, - request: metadata_service.CreateArtifactRequest = None, - *, - parent: str = None, - artifact: gca_artifact.Artifact = None, - artifact_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Creates an Artifact associated with a MetadataStore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest): - The request object. Request message for - [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. - parent (str): - Required. The resource name of the - MetadataStore where the Artifact should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact (google.cloud.aiplatform_v1beta1.types.Artifact): - Required. The Artifact to create. - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact_id (str): - The {artifact} portion of the resource name with the - format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - If not provided, the Artifact's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all Artifacts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Artifact.) - - This corresponds to the ``artifact_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, artifact, artifact_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateArtifactRequest): - request = metadata_service.CreateArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if artifact is not None: - request.artifact = artifact - if artifact_id is not None: - request.artifact_id = artifact_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_artifact(self, - request: metadata_service.GetArtifactRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> artifact.Artifact: - r"""Retrieves a specific Artifact. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetArtifactRequest): - The request object. Request message for - [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. - name (str): - Required. The resource name of the - Artifact to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetArtifactRequest): - request = metadata_service.GetArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_artifacts(self, - request: metadata_service.ListArtifactsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListArtifactsPager: - r"""Lists Artifacts in the MetadataStore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): - The request object. Request message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - parent (str): - Required. The MetadataStore whose - Artifacts should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsPager: - Response message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListArtifactsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListArtifactsRequest): - request = metadata_service.ListArtifactsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_artifacts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListArtifactsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_artifact(self, - request: metadata_service.UpdateArtifactRequest = None, - *, - artifact: gca_artifact.Artifact = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Updates a stored Artifact. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest): - The request object. Request message for - [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. - artifact (google.cloud.aiplatform_v1beta1.types.Artifact): - Required. The Artifact containing updates. The - Artifact's - [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] - field is used to identify the Artifact to be updated. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating - which fields should be updated. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.UpdateArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.UpdateArtifactRequest): - request = metadata_service.UpdateArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact.name", request.artifact.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_context(self, - request: metadata_service.CreateContextRequest = None, - *, - parent: str = None, - context: gca_context.Context = None, - context_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Creates a Context associated with a MetadataStore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateContextRequest): - The request object. Request message for - [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. - parent (str): - Required. The resource name of the - MetadataStore where the Context should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context (google.cloud.aiplatform_v1beta1.types.Context): - Required. The Context to create. - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context_id (str): - The {context} portion of the resource name with the - format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. - If not provided, the Context's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all Contexts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Context.) - - This corresponds to the ``context_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, context, context_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateContextRequest): - request = metadata_service.CreateContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if context is not None: - request.context = context - if context_id is not None: - request.context_id = context_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_context(self, - request: metadata_service.GetContextRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> context.Context: - r"""Retrieves a specific Context. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetContextRequest): - The request object. Request message for - [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. - name (str): - Required. The resource name of the - Context to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetContextRequest): - request = metadata_service.GetContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_contexts(self, - request: metadata_service.ListContextsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListContextsPager: - r"""Lists Contexts on the MetadataStore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): - The request object. Request message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] - parent (str): - Required. The MetadataStore whose - Contexts should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsPager: - Response message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListContextsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListContextsRequest): - request = metadata_service.ListContextsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_contexts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListContextsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_context(self, - request: metadata_service.UpdateContextRequest = None, - *, - context: gca_context.Context = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Updates a stored Context. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateContextRequest): - The request object. Request message for - [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. - context (google.cloud.aiplatform_v1beta1.types.Context): - Required. The Context containing updates. The Context's - [Context.name][google.cloud.aiplatform.v1beta1.Context.name] - field is used to identify the Context to be updated. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating - which fields should be updated. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.UpdateContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.UpdateContextRequest): - request = metadata_service.UpdateContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context.name", request.context.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_context(self, - request: metadata_service.DeleteContextRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a stored Context. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteContextRequest): - The request object. Request message for - [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. - name (str): - Required. The resource name of the - Context to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteContextRequest): - request = metadata_service.DeleteContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def add_context_artifacts_and_executions(self, - request: metadata_service.AddContextArtifactsAndExecutionsRequest = None, - *, - context: str = None, - artifacts: Sequence[str] = None, - executions: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: - r"""Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Args: - request (google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest): - The request object. Request message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - context (str): - Required. The resource name of the - Context that the Artifacts and - Executions belong to. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifacts (Sequence[str]): - The resource names of the Artifacts - to attribute to the Context. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - - This corresponds to the ``artifacts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - executions (Sequence[str]): - The resource names of the Executions - to associate with the Context. - - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``executions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: - Response message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, artifacts, executions]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.AddContextArtifactsAndExecutionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.AddContextArtifactsAndExecutionsRequest): - request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if artifacts is not None: - request.artifacts = artifacts - if executions is not None: - request.executions = executions - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_context_artifacts_and_executions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def add_context_children(self, - request: metadata_service.AddContextChildrenRequest = None, - *, - context: str = None, - child_contexts: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextChildrenResponse: - r"""Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Args: - request (google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest): - The request object. Request message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - context (str): - Required. The resource name of the - parent Context. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - child_contexts (Sequence[str]): - The resource names of the child - Contexts. - - This corresponds to the ``child_contexts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: - Response message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.AddContextChildrenRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.AddContextChildrenRequest): - request = metadata_service.AddContextChildrenRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if child_contexts is not None: - request.child_contexts = child_contexts - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_context_children] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def query_context_lineage_subgraph(self, - request: metadata_service.QueryContextLineageSubgraphRequest = None, - *, - context: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Args: - request (google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest): - The request object. Request message for - [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. - context (str): - Required. The resource name of the Context whose - Artifacts and Executions should be retrieved as a - LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.QueryContextLineageSubgraphRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.QueryContextLineageSubgraphRequest): - request = metadata_service.QueryContextLineageSubgraphRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.query_context_lineage_subgraph] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_execution(self, - request: metadata_service.CreateExecutionRequest = None, - *, - parent: str = None, - execution: gca_execution.Execution = None, - execution_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Creates an Execution associated with a MetadataStore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest): - The request object. Request message for - [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. - parent (str): - Required. The resource name of the - MetadataStore where the Execution should - be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Required. The Execution to create. - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution_id (str): - The {execution} portion of the resource name with the - format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - If not provided, the Execution's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all Executions in the parent - MetadataStore. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting Execution.) - - This corresponds to the ``execution_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, execution, execution_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateExecutionRequest): - request = metadata_service.CreateExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if execution is not None: - request.execution = execution - if execution_id is not None: - request.execution_id = execution_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_execution(self, - request: metadata_service.GetExecutionRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> execution.Execution: - r"""Retrieves a specific Execution. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetExecutionRequest): - The request object. Request message for - [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. - name (str): - Required. The resource name of the - Execution to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetExecutionRequest): - request = metadata_service.GetExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_executions(self, - request: metadata_service.ListExecutionsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListExecutionsPager: - r"""Lists Executions in the MetadataStore. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): - The request object. Request message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - parent (str): - Required. The MetadataStore whose - Executions should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsPager: - Response message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListExecutionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListExecutionsRequest): - request = metadata_service.ListExecutionsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_executions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListExecutionsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_execution(self, - request: metadata_service.UpdateExecutionRequest = None, - *, - execution: gca_execution.Execution = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Updates a stored Execution. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest): - The request object. Request message for - [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Required. The Execution containing updates. The - Execution's - [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] - field is used to identify the Execution to be updated. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating - which fields should be updated. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.UpdateExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.UpdateExecutionRequest): - request = metadata_service.UpdateExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution.name", request.execution.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def add_execution_events(self, - request: metadata_service.AddExecutionEventsRequest = None, - *, - execution: str = None, - events: Sequence[event.Event] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddExecutionEventsResponse: - r"""Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Args: - request (google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest): - The request object. Request message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - execution (str): - Required. The resource name of the - Execution that the Events connect - Artifacts with. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): - The Events to create and add. - This corresponds to the ``events`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: - Response message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, events]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.AddExecutionEventsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.AddExecutionEventsRequest): - request = metadata_service.AddExecutionEventsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if events is not None: - request.events = events - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_execution_events] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def query_execution_inputs_and_outputs(self, - request: metadata_service.QueryExecutionInputsAndOutputsRequest = None, - *, - execution: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Args: - request (google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest): - The request object. Request message for - [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. - execution (str): - Required. The resource name of the - Execution whose input and output - Artifacts should be retrieved as a - LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.QueryExecutionInputsAndOutputsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.QueryExecutionInputsAndOutputsRequest): - request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.query_execution_inputs_and_outputs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_metadata_schema(self, - request: metadata_service.CreateMetadataSchemaRequest = None, - *, - parent: str = None, - metadata_schema: gca_metadata_schema.MetadataSchema = None, - metadata_schema_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_metadata_schema.MetadataSchema: - r"""Creates a MetadataSchema. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest): - The request object. Request message for - [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. - parent (str): - Required. The resource name of the - MetadataStore where the MetadataSchema - should be created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): - Required. The MetadataSchema to - create. - - This corresponds to the ``metadata_schema`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema_id (str): - The {metadata_schema} portion of the resource name with - the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be - unique across all MetadataSchemas in the parent - Location. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting MetadataSchema.) - - This corresponds to the ``metadata_schema_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateMetadataSchemaRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateMetadataSchemaRequest): - request = metadata_service.CreateMetadataSchemaRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_schema is not None: - request.metadata_schema = metadata_schema - if metadata_schema_id is not None: - request.metadata_schema_id = metadata_schema_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_metadata_schema] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_metadata_schema(self, - request: metadata_service.GetMetadataSchemaRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_schema.MetadataSchema: - r"""Retrieves a specific MetadataSchema. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest): - The request object. Request message for - [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. - name (str): - Required. The resource name of the - MetadataSchema to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetMetadataSchemaRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetMetadataSchemaRequest): - request = metadata_service.GetMetadataSchemaRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_metadata_schema] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_metadata_schemas(self, - request: metadata_service.ListMetadataSchemasRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataSchemasPager: - r"""Lists MetadataSchemas. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): - The request object. Request message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - parent (str): - Required. The MetadataStore whose - MetadataSchemas should be listed. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasPager: - Response message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListMetadataSchemasRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListMetadataSchemasRequest): - request = metadata_service.ListMetadataSchemasRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_metadata_schemas] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListMetadataSchemasPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def query_artifact_lineage_subgraph(self, - request: metadata_service.QueryArtifactLineageSubgraphRequest = None, - *, - artifact: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Args: - request (google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest): - The request object. Request message for - [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. - artifact (str): - Required. The resource name of the Artifact whose - Lineage needs to be retrieved as a LineageSubgraph. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.QueryArtifactLineageSubgraphRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.QueryArtifactLineageSubgraphRequest): - request = metadata_service.QueryArtifactLineageSubgraphRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.query_artifact_lineage_subgraph] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact", request.artifact), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MetadataServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py deleted file mode 100644 index 1bff1d843a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store - - -class ListMetadataStoresPager: - """A pager for iterating through ``list_metadata_stores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and - provides an ``__iter__`` method to iterate through its - ``metadata_stores`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListMetadataStores`` requests and continue to iterate - through the ``metadata_stores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListMetadataStoresResponse], - request: metadata_service.ListMetadataStoresRequest, - response: metadata_service.ListMetadataStoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataStoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[metadata_service.ListMetadataStoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[metadata_store.MetadataStore]: - for page in self.pages: - yield from page.metadata_stores - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListMetadataStoresAsyncPager: - """A pager for iterating through ``list_metadata_stores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``metadata_stores`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListMetadataStores`` requests and continue to iterate - through the ``metadata_stores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListMetadataStoresResponse]], - request: metadata_service.ListMetadataStoresRequest, - response: metadata_service.ListMetadataStoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataStoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[metadata_service.ListMetadataStoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[metadata_store.MetadataStore]: - async def async_generator(): - async for page in self.pages: - for response in page.metadata_stores: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListArtifactsPager: - """A pager for iterating through ``list_artifacts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``artifacts`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListArtifacts`` requests and continue to iterate - through the ``artifacts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListArtifactsResponse], - request: metadata_service.ListArtifactsRequest, - response: metadata_service.ListArtifactsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListArtifactsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[metadata_service.ListArtifactsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[artifact.Artifact]: - for page in self.pages: - yield from page.artifacts - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListArtifactsAsyncPager: - """A pager for iterating through ``list_artifacts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``artifacts`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListArtifacts`` requests and continue to iterate - through the ``artifacts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListArtifactsResponse]], - request: metadata_service.ListArtifactsRequest, - response: metadata_service.ListArtifactsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListArtifactsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[metadata_service.ListArtifactsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[artifact.Artifact]: - async def async_generator(): - async for page in self.pages: - for response in page.artifacts: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListContextsPager: - """A pager for iterating through ``list_contexts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``contexts`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListContexts`` requests and continue to iterate - through the ``contexts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListContextsResponse], - request: metadata_service.ListContextsRequest, - response: metadata_service.ListContextsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListContextsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[metadata_service.ListContextsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[context.Context]: - for page in self.pages: - yield from page.contexts - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListContextsAsyncPager: - """A pager for iterating through ``list_contexts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``contexts`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListContexts`` requests and continue to iterate - through the ``contexts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListContextsResponse]], - request: metadata_service.ListContextsRequest, - response: metadata_service.ListContextsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListContextsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[metadata_service.ListContextsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[context.Context]: - async def async_generator(): - async for page in self.pages: - for response in page.contexts: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListExecutionsPager: - """A pager for iterating through ``list_executions`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``executions`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListExecutions`` requests and continue to iterate - through the ``executions`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListExecutionsResponse], - request: metadata_service.ListExecutionsRequest, - response: metadata_service.ListExecutionsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListExecutionsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[metadata_service.ListExecutionsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[execution.Execution]: - for page in self.pages: - yield from page.executions - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListExecutionsAsyncPager: - """A pager for iterating through ``list_executions`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``executions`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListExecutions`` requests and continue to iterate - through the ``executions`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListExecutionsResponse]], - request: metadata_service.ListExecutionsRequest, - response: metadata_service.ListExecutionsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListExecutionsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[metadata_service.ListExecutionsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[execution.Execution]: - async def async_generator(): - async for page in self.pages: - for response in page.executions: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListMetadataSchemasPager: - """A pager for iterating through ``list_metadata_schemas`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and - provides an ``__iter__`` method to iterate through its - ``metadata_schemas`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListMetadataSchemas`` requests and continue to iterate - through the ``metadata_schemas`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListMetadataSchemasResponse], - request: metadata_service.ListMetadataSchemasRequest, - response: metadata_service.ListMetadataSchemasResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataSchemasRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[metadata_service.ListMetadataSchemasResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[metadata_schema.MetadataSchema]: - for page in self.pages: - yield from page.metadata_schemas - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListMetadataSchemasAsyncPager: - """A pager for iterating through ``list_metadata_schemas`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``metadata_schemas`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListMetadataSchemas`` requests and continue to iterate - through the ``metadata_schemas`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListMetadataSchemasResponse]], - request: metadata_service.ListMetadataSchemasRequest, - response: metadata_service.ListMetadataSchemasResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataSchemasRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[metadata_service.ListMetadataSchemasResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[metadata_schema.MetadataSchema]: - async def async_generator(): - async for page in self.pages: - for response in page.metadata_schemas: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py deleted file mode 100644 index 688ce8218c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import MetadataServiceTransport -from .grpc import MetadataServiceGrpcTransport -from .grpc_asyncio import MetadataServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] -_transport_registry['grpc'] = MetadataServiceGrpcTransport -_transport_registry['grpc_asyncio'] = MetadataServiceGrpcAsyncIOTransport - -__all__ = ( - 'MetadataServiceTransport', - 'MetadataServiceGrpcTransport', - 'MetadataServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py deleted file mode 100644 index 388f641b8f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py +++ /dev/null @@ -1,535 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class MetadataServiceTransport(abc.ABC): - """Abstract transport class for MetadataService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_metadata_store: gapic_v1.method.wrap_method( - self.create_metadata_store, - default_timeout=5.0, - client_info=client_info, - ), - self.get_metadata_store: gapic_v1.method.wrap_method( - self.get_metadata_store, - default_timeout=5.0, - client_info=client_info, - ), - self.list_metadata_stores: gapic_v1.method.wrap_method( - self.list_metadata_stores, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_metadata_store: gapic_v1.method.wrap_method( - self.delete_metadata_store, - default_timeout=5.0, - client_info=client_info, - ), - self.create_artifact: gapic_v1.method.wrap_method( - self.create_artifact, - default_timeout=5.0, - client_info=client_info, - ), - self.get_artifact: gapic_v1.method.wrap_method( - self.get_artifact, - default_timeout=5.0, - client_info=client_info, - ), - self.list_artifacts: gapic_v1.method.wrap_method( - self.list_artifacts, - default_timeout=5.0, - client_info=client_info, - ), - self.update_artifact: gapic_v1.method.wrap_method( - self.update_artifact, - default_timeout=5.0, - client_info=client_info, - ), - self.create_context: gapic_v1.method.wrap_method( - self.create_context, - default_timeout=5.0, - client_info=client_info, - ), - self.get_context: gapic_v1.method.wrap_method( - self.get_context, - default_timeout=5.0, - client_info=client_info, - ), - self.list_contexts: gapic_v1.method.wrap_method( - self.list_contexts, - default_timeout=5.0, - client_info=client_info, - ), - self.update_context: gapic_v1.method.wrap_method( - self.update_context, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_context: gapic_v1.method.wrap_method( - self.delete_context, - default_timeout=5.0, - client_info=client_info, - ), - self.add_context_artifacts_and_executions: gapic_v1.method.wrap_method( - self.add_context_artifacts_and_executions, - default_timeout=5.0, - client_info=client_info, - ), - self.add_context_children: gapic_v1.method.wrap_method( - self.add_context_children, - default_timeout=5.0, - client_info=client_info, - ), - self.query_context_lineage_subgraph: gapic_v1.method.wrap_method( - self.query_context_lineage_subgraph, - default_timeout=5.0, - client_info=client_info, - ), - self.create_execution: gapic_v1.method.wrap_method( - self.create_execution, - default_timeout=5.0, - client_info=client_info, - ), - self.get_execution: gapic_v1.method.wrap_method( - self.get_execution, - default_timeout=5.0, - client_info=client_info, - ), - self.list_executions: gapic_v1.method.wrap_method( - self.list_executions, - default_timeout=5.0, - client_info=client_info, - ), - self.update_execution: gapic_v1.method.wrap_method( - self.update_execution, - default_timeout=5.0, - client_info=client_info, - ), - self.add_execution_events: gapic_v1.method.wrap_method( - self.add_execution_events, - default_timeout=5.0, - client_info=client_info, - ), - self.query_execution_inputs_and_outputs: gapic_v1.method.wrap_method( - self.query_execution_inputs_and_outputs, - default_timeout=5.0, - client_info=client_info, - ), - self.create_metadata_schema: gapic_v1.method.wrap_method( - self.create_metadata_schema, - default_timeout=5.0, - client_info=client_info, - ), - self.get_metadata_schema: gapic_v1.method.wrap_method( - self.get_metadata_schema, - default_timeout=5.0, - client_info=client_info, - ), - self.list_metadata_schemas: gapic_v1.method.wrap_method( - self.list_metadata_schemas, - default_timeout=5.0, - client_info=client_info, - ), - self.query_artifact_lineage_subgraph: gapic_v1.method.wrap_method( - self.query_artifact_lineage_subgraph, - default_timeout=None, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_metadata_store(self) -> Callable[ - [metadata_service.CreateMetadataStoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_metadata_store(self) -> Callable[ - [metadata_service.GetMetadataStoreRequest], - Union[ - metadata_store.MetadataStore, - Awaitable[metadata_store.MetadataStore] - ]]: - raise NotImplementedError() - - @property - def list_metadata_stores(self) -> Callable[ - [metadata_service.ListMetadataStoresRequest], - Union[ - metadata_service.ListMetadataStoresResponse, - Awaitable[metadata_service.ListMetadataStoresResponse] - ]]: - raise NotImplementedError() - - @property - def delete_metadata_store(self) -> Callable[ - [metadata_service.DeleteMetadataStoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_artifact(self) -> Callable[ - [metadata_service.CreateArtifactRequest], - Union[ - gca_artifact.Artifact, - Awaitable[gca_artifact.Artifact] - ]]: - raise NotImplementedError() - - @property - def get_artifact(self) -> Callable[ - [metadata_service.GetArtifactRequest], - Union[ - artifact.Artifact, - Awaitable[artifact.Artifact] - ]]: - raise NotImplementedError() - - @property - def list_artifacts(self) -> Callable[ - [metadata_service.ListArtifactsRequest], - Union[ - metadata_service.ListArtifactsResponse, - Awaitable[metadata_service.ListArtifactsResponse] - ]]: - raise NotImplementedError() - - @property - def update_artifact(self) -> Callable[ - [metadata_service.UpdateArtifactRequest], - Union[ - gca_artifact.Artifact, - Awaitable[gca_artifact.Artifact] - ]]: - raise NotImplementedError() - - @property - def create_context(self) -> Callable[ - [metadata_service.CreateContextRequest], - Union[ - gca_context.Context, - Awaitable[gca_context.Context] - ]]: - raise NotImplementedError() - - @property - def get_context(self) -> Callable[ - [metadata_service.GetContextRequest], - Union[ - context.Context, - Awaitable[context.Context] - ]]: - raise NotImplementedError() - - @property - def list_contexts(self) -> Callable[ - [metadata_service.ListContextsRequest], - Union[ - metadata_service.ListContextsResponse, - Awaitable[metadata_service.ListContextsResponse] - ]]: - raise NotImplementedError() - - @property - def update_context(self) -> Callable[ - [metadata_service.UpdateContextRequest], - Union[ - gca_context.Context, - Awaitable[gca_context.Context] - ]]: - raise NotImplementedError() - - @property - def delete_context(self) -> Callable[ - [metadata_service.DeleteContextRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def add_context_artifacts_and_executions(self) -> Callable[ - [metadata_service.AddContextArtifactsAndExecutionsRequest], - Union[ - metadata_service.AddContextArtifactsAndExecutionsResponse, - Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse] - ]]: - raise NotImplementedError() - - @property - def add_context_children(self) -> Callable[ - [metadata_service.AddContextChildrenRequest], - Union[ - metadata_service.AddContextChildrenResponse, - Awaitable[metadata_service.AddContextChildrenResponse] - ]]: - raise NotImplementedError() - - @property - def query_context_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryContextLineageSubgraphRequest], - Union[ - lineage_subgraph.LineageSubgraph, - Awaitable[lineage_subgraph.LineageSubgraph] - ]]: - raise NotImplementedError() - - @property - def create_execution(self) -> Callable[ - [metadata_service.CreateExecutionRequest], - Union[ - gca_execution.Execution, - Awaitable[gca_execution.Execution] - ]]: - raise NotImplementedError() - - @property - def get_execution(self) -> Callable[ - [metadata_service.GetExecutionRequest], - Union[ - execution.Execution, - Awaitable[execution.Execution] - ]]: - raise NotImplementedError() - - @property - def list_executions(self) -> Callable[ - [metadata_service.ListExecutionsRequest], - Union[ - metadata_service.ListExecutionsResponse, - Awaitable[metadata_service.ListExecutionsResponse] - ]]: - raise NotImplementedError() - - @property - def update_execution(self) -> Callable[ - [metadata_service.UpdateExecutionRequest], - Union[ - gca_execution.Execution, - Awaitable[gca_execution.Execution] - ]]: - raise NotImplementedError() - - @property - def add_execution_events(self) -> Callable[ - [metadata_service.AddExecutionEventsRequest], - Union[ - metadata_service.AddExecutionEventsResponse, - Awaitable[metadata_service.AddExecutionEventsResponse] - ]]: - raise NotImplementedError() - - @property - def query_execution_inputs_and_outputs(self) -> Callable[ - [metadata_service.QueryExecutionInputsAndOutputsRequest], - Union[ - lineage_subgraph.LineageSubgraph, - Awaitable[lineage_subgraph.LineageSubgraph] - ]]: - raise NotImplementedError() - - @property - def create_metadata_schema(self) -> Callable[ - [metadata_service.CreateMetadataSchemaRequest], - Union[ - gca_metadata_schema.MetadataSchema, - Awaitable[gca_metadata_schema.MetadataSchema] - ]]: - raise NotImplementedError() - - @property - def get_metadata_schema(self) -> Callable[ - [metadata_service.GetMetadataSchemaRequest], - Union[ - metadata_schema.MetadataSchema, - Awaitable[metadata_schema.MetadataSchema] - ]]: - raise NotImplementedError() - - @property - def list_metadata_schemas(self) -> Callable[ - [metadata_service.ListMetadataSchemasRequest], - Union[ - metadata_service.ListMetadataSchemasResponse, - Awaitable[metadata_service.ListMetadataSchemasResponse] - ]]: - raise NotImplementedError() - - @property - def query_artifact_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryArtifactLineageSubgraphRequest], - Union[ - lineage_subgraph.LineageSubgraph, - Awaitable[lineage_subgraph.LineageSubgraph] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'MetadataServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py deleted file mode 100644 index 5dbecd6d3e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py +++ /dev/null @@ -1,951 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.longrunning import operations_pb2 # type: ignore -from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO - - -class MetadataServiceGrpcTransport(MetadataServiceTransport): - """gRPC backend transport for MetadataService. - - Service for reading and writing metadata entries. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_metadata_store(self) -> Callable[ - [metadata_service.CreateMetadataStoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the create metadata store method over gRPC. - - Initializes a MetadataStore, including allocation of - resources. - - Returns: - Callable[[~.CreateMetadataStoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_store' not in self._stubs: - self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore', - request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_metadata_store'] - - @property - def get_metadata_store(self) -> Callable[ - [metadata_service.GetMetadataStoreRequest], - metadata_store.MetadataStore]: - r"""Return a callable for the get metadata store method over gRPC. - - Retrieves a specific MetadataStore. - - Returns: - Callable[[~.GetMetadataStoreRequest], - ~.MetadataStore]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_store' not in self._stubs: - self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore', - request_serializer=metadata_service.GetMetadataStoreRequest.serialize, - response_deserializer=metadata_store.MetadataStore.deserialize, - ) - return self._stubs['get_metadata_store'] - - @property - def list_metadata_stores(self) -> Callable[ - [metadata_service.ListMetadataStoresRequest], - metadata_service.ListMetadataStoresResponse]: - r"""Return a callable for the list metadata stores method over gRPC. - - Lists MetadataStores for a Location. - - Returns: - Callable[[~.ListMetadataStoresRequest], - ~.ListMetadataStoresResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_stores' not in self._stubs: - self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores', - request_serializer=metadata_service.ListMetadataStoresRequest.serialize, - response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, - ) - return self._stubs['list_metadata_stores'] - - @property - def delete_metadata_store(self) -> Callable[ - [metadata_service.DeleteMetadataStoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete metadata store method over gRPC. - - Deletes a single MetadataStore. - - Returns: - Callable[[~.DeleteMetadataStoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_metadata_store' not in self._stubs: - self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore', - request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_metadata_store'] - - @property - def create_artifact(self) -> Callable[ - [metadata_service.CreateArtifactRequest], - gca_artifact.Artifact]: - r"""Return a callable for the create artifact method over gRPC. - - Creates an Artifact associated with a MetadataStore. - - Returns: - Callable[[~.CreateArtifactRequest], - ~.Artifact]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_artifact' not in self._stubs: - self._stubs['create_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact', - request_serializer=metadata_service.CreateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['create_artifact'] - - @property - def get_artifact(self) -> Callable[ - [metadata_service.GetArtifactRequest], - artifact.Artifact]: - r"""Return a callable for the get artifact method over gRPC. - - Retrieves a specific Artifact. - - Returns: - Callable[[~.GetArtifactRequest], - ~.Artifact]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_artifact' not in self._stubs: - self._stubs['get_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact', - request_serializer=metadata_service.GetArtifactRequest.serialize, - response_deserializer=artifact.Artifact.deserialize, - ) - return self._stubs['get_artifact'] - - @property - def list_artifacts(self) -> Callable[ - [metadata_service.ListArtifactsRequest], - metadata_service.ListArtifactsResponse]: - r"""Return a callable for the list artifacts method over gRPC. - - Lists Artifacts in the MetadataStore. - - Returns: - Callable[[~.ListArtifactsRequest], - ~.ListArtifactsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_artifacts' not in self._stubs: - self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts', - request_serializer=metadata_service.ListArtifactsRequest.serialize, - response_deserializer=metadata_service.ListArtifactsResponse.deserialize, - ) - return self._stubs['list_artifacts'] - - @property - def update_artifact(self) -> Callable[ - [metadata_service.UpdateArtifactRequest], - gca_artifact.Artifact]: - r"""Return a callable for the update artifact method over gRPC. - - Updates a stored Artifact. - - Returns: - Callable[[~.UpdateArtifactRequest], - ~.Artifact]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_artifact' not in self._stubs: - self._stubs['update_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact', - request_serializer=metadata_service.UpdateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['update_artifact'] - - @property - def create_context(self) -> Callable[ - [metadata_service.CreateContextRequest], - gca_context.Context]: - r"""Return a callable for the create context method over gRPC. - - Creates a Context associated with a MetadataStore. - - Returns: - Callable[[~.CreateContextRequest], - ~.Context]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_context' not in self._stubs: - self._stubs['create_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext', - request_serializer=metadata_service.CreateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['create_context'] - - @property - def get_context(self) -> Callable[ - [metadata_service.GetContextRequest], - context.Context]: - r"""Return a callable for the get context method over gRPC. - - Retrieves a specific Context. - - Returns: - Callable[[~.GetContextRequest], - ~.Context]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_context' not in self._stubs: - self._stubs['get_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetContext', - request_serializer=metadata_service.GetContextRequest.serialize, - response_deserializer=context.Context.deserialize, - ) - return self._stubs['get_context'] - - @property - def list_contexts(self) -> Callable[ - [metadata_service.ListContextsRequest], - metadata_service.ListContextsResponse]: - r"""Return a callable for the list contexts method over gRPC. - - Lists Contexts on the MetadataStore. - - Returns: - Callable[[~.ListContextsRequest], - ~.ListContextsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_contexts' not in self._stubs: - self._stubs['list_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts', - request_serializer=metadata_service.ListContextsRequest.serialize, - response_deserializer=metadata_service.ListContextsResponse.deserialize, - ) - return self._stubs['list_contexts'] - - @property - def update_context(self) -> Callable[ - [metadata_service.UpdateContextRequest], - gca_context.Context]: - r"""Return a callable for the update context method over gRPC. - - Updates a stored Context. - - Returns: - Callable[[~.UpdateContextRequest], - ~.Context]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_context' not in self._stubs: - self._stubs['update_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext', - request_serializer=metadata_service.UpdateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['update_context'] - - @property - def delete_context(self) -> Callable[ - [metadata_service.DeleteContextRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete context method over gRPC. - - Deletes a stored Context. - - Returns: - Callable[[~.DeleteContextRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_context' not in self._stubs: - self._stubs['delete_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext', - request_serializer=metadata_service.DeleteContextRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_context'] - - @property - def add_context_artifacts_and_executions(self) -> Callable[ - [metadata_service.AddContextArtifactsAndExecutionsRequest], - metadata_service.AddContextArtifactsAndExecutionsResponse]: - r"""Return a callable for the add context artifacts and - executions method over gRPC. - - Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Returns: - Callable[[~.AddContextArtifactsAndExecutionsRequest], - ~.AddContextArtifactsAndExecutionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_artifacts_and_executions' not in self._stubs: - self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions', - request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, - response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, - ) - return self._stubs['add_context_artifacts_and_executions'] - - @property - def add_context_children(self) -> Callable[ - [metadata_service.AddContextChildrenRequest], - metadata_service.AddContextChildrenResponse]: - r"""Return a callable for the add context children method over gRPC. - - Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Returns: - Callable[[~.AddContextChildrenRequest], - ~.AddContextChildrenResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_children' not in self._stubs: - self._stubs['add_context_children'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren', - request_serializer=metadata_service.AddContextChildrenRequest.serialize, - response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, - ) - return self._stubs['add_context_children'] - - @property - def query_context_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryContextLineageSubgraphRequest], - lineage_subgraph.LineageSubgraph]: - r"""Return a callable for the query context lineage subgraph method over gRPC. - - Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Returns: - Callable[[~.QueryContextLineageSubgraphRequest], - ~.LineageSubgraph]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_context_lineage_subgraph' not in self._stubs: - self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph', - request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_context_lineage_subgraph'] - - @property - def create_execution(self) -> Callable[ - [metadata_service.CreateExecutionRequest], - gca_execution.Execution]: - r"""Return a callable for the create execution method over gRPC. - - Creates an Execution associated with a MetadataStore. - - Returns: - Callable[[~.CreateExecutionRequest], - ~.Execution]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_execution' not in self._stubs: - self._stubs['create_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution', - request_serializer=metadata_service.CreateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['create_execution'] - - @property - def get_execution(self) -> Callable[ - [metadata_service.GetExecutionRequest], - execution.Execution]: - r"""Return a callable for the get execution method over gRPC. - - Retrieves a specific Execution. - - Returns: - Callable[[~.GetExecutionRequest], - ~.Execution]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_execution' not in self._stubs: - self._stubs['get_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution', - request_serializer=metadata_service.GetExecutionRequest.serialize, - response_deserializer=execution.Execution.deserialize, - ) - return self._stubs['get_execution'] - - @property - def list_executions(self) -> Callable[ - [metadata_service.ListExecutionsRequest], - metadata_service.ListExecutionsResponse]: - r"""Return a callable for the list executions method over gRPC. - - Lists Executions in the MetadataStore. - - Returns: - Callable[[~.ListExecutionsRequest], - ~.ListExecutionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_executions' not in self._stubs: - self._stubs['list_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions', - request_serializer=metadata_service.ListExecutionsRequest.serialize, - response_deserializer=metadata_service.ListExecutionsResponse.deserialize, - ) - return self._stubs['list_executions'] - - @property - def update_execution(self) -> Callable[ - [metadata_service.UpdateExecutionRequest], - gca_execution.Execution]: - r"""Return a callable for the update execution method over gRPC. - - Updates a stored Execution. - - Returns: - Callable[[~.UpdateExecutionRequest], - ~.Execution]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_execution' not in self._stubs: - self._stubs['update_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution', - request_serializer=metadata_service.UpdateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['update_execution'] - - @property - def add_execution_events(self) -> Callable[ - [metadata_service.AddExecutionEventsRequest], - metadata_service.AddExecutionEventsResponse]: - r"""Return a callable for the add execution events method over gRPC. - - Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Returns: - Callable[[~.AddExecutionEventsRequest], - ~.AddExecutionEventsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_execution_events' not in self._stubs: - self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents', - request_serializer=metadata_service.AddExecutionEventsRequest.serialize, - response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, - ) - return self._stubs['add_execution_events'] - - @property - def query_execution_inputs_and_outputs(self) -> Callable[ - [metadata_service.QueryExecutionInputsAndOutputsRequest], - lineage_subgraph.LineageSubgraph]: - r"""Return a callable for the query execution inputs and - outputs method over gRPC. - - Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Returns: - Callable[[~.QueryExecutionInputsAndOutputsRequest], - ~.LineageSubgraph]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_execution_inputs_and_outputs' not in self._stubs: - self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs', - request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_execution_inputs_and_outputs'] - - @property - def create_metadata_schema(self) -> Callable[ - [metadata_service.CreateMetadataSchemaRequest], - gca_metadata_schema.MetadataSchema]: - r"""Return a callable for the create metadata schema method over gRPC. - - Creates a MetadataSchema. - - Returns: - Callable[[~.CreateMetadataSchemaRequest], - ~.MetadataSchema]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_schema' not in self._stubs: - self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema', - request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, - response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['create_metadata_schema'] - - @property - def get_metadata_schema(self) -> Callable[ - [metadata_service.GetMetadataSchemaRequest], - metadata_schema.MetadataSchema]: - r"""Return a callable for the get metadata schema method over gRPC. - - Retrieves a specific MetadataSchema. - - Returns: - Callable[[~.GetMetadataSchemaRequest], - ~.MetadataSchema]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_schema' not in self._stubs: - self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema', - request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, - response_deserializer=metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['get_metadata_schema'] - - @property - def list_metadata_schemas(self) -> Callable[ - [metadata_service.ListMetadataSchemasRequest], - metadata_service.ListMetadataSchemasResponse]: - r"""Return a callable for the list metadata schemas method over gRPC. - - Lists MetadataSchemas. - - Returns: - Callable[[~.ListMetadataSchemasRequest], - ~.ListMetadataSchemasResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_schemas' not in self._stubs: - self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas', - request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, - response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, - ) - return self._stubs['list_metadata_schemas'] - - @property - def query_artifact_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryArtifactLineageSubgraphRequest], - lineage_subgraph.LineageSubgraph]: - r"""Return a callable for the query artifact lineage - subgraph method over gRPC. - - Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Returns: - Callable[[~.QueryArtifactLineageSubgraphRequest], - ~.LineageSubgraph]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_artifact_lineage_subgraph' not in self._stubs: - self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph', - request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_artifact_lineage_subgraph'] - - -__all__ = ( - 'MetadataServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py deleted file mode 100644 index 419be7161d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,955 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.longrunning import operations_pb2 # type: ignore -from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import MetadataServiceGrpcTransport - - -class MetadataServiceGrpcAsyncIOTransport(MetadataServiceTransport): - """gRPC AsyncIO backend transport for MetadataService. - - Service for reading and writing metadata entries. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_metadata_store(self) -> Callable[ - [metadata_service.CreateMetadataStoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create metadata store method over gRPC. - - Initializes a MetadataStore, including allocation of - resources. - - Returns: - Callable[[~.CreateMetadataStoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_store' not in self._stubs: - self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore', - request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_metadata_store'] - - @property - def get_metadata_store(self) -> Callable[ - [metadata_service.GetMetadataStoreRequest], - Awaitable[metadata_store.MetadataStore]]: - r"""Return a callable for the get metadata store method over gRPC. - - Retrieves a specific MetadataStore. - - Returns: - Callable[[~.GetMetadataStoreRequest], - Awaitable[~.MetadataStore]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_store' not in self._stubs: - self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore', - request_serializer=metadata_service.GetMetadataStoreRequest.serialize, - response_deserializer=metadata_store.MetadataStore.deserialize, - ) - return self._stubs['get_metadata_store'] - - @property - def list_metadata_stores(self) -> Callable[ - [metadata_service.ListMetadataStoresRequest], - Awaitable[metadata_service.ListMetadataStoresResponse]]: - r"""Return a callable for the list metadata stores method over gRPC. - - Lists MetadataStores for a Location. - - Returns: - Callable[[~.ListMetadataStoresRequest], - Awaitable[~.ListMetadataStoresResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_stores' not in self._stubs: - self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores', - request_serializer=metadata_service.ListMetadataStoresRequest.serialize, - response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, - ) - return self._stubs['list_metadata_stores'] - - @property - def delete_metadata_store(self) -> Callable[ - [metadata_service.DeleteMetadataStoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete metadata store method over gRPC. - - Deletes a single MetadataStore. - - Returns: - Callable[[~.DeleteMetadataStoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_metadata_store' not in self._stubs: - self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore', - request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_metadata_store'] - - @property - def create_artifact(self) -> Callable[ - [metadata_service.CreateArtifactRequest], - Awaitable[gca_artifact.Artifact]]: - r"""Return a callable for the create artifact method over gRPC. - - Creates an Artifact associated with a MetadataStore. - - Returns: - Callable[[~.CreateArtifactRequest], - Awaitable[~.Artifact]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_artifact' not in self._stubs: - self._stubs['create_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact', - request_serializer=metadata_service.CreateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['create_artifact'] - - @property - def get_artifact(self) -> Callable[ - [metadata_service.GetArtifactRequest], - Awaitable[artifact.Artifact]]: - r"""Return a callable for the get artifact method over gRPC. - - Retrieves a specific Artifact. - - Returns: - Callable[[~.GetArtifactRequest], - Awaitable[~.Artifact]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_artifact' not in self._stubs: - self._stubs['get_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact', - request_serializer=metadata_service.GetArtifactRequest.serialize, - response_deserializer=artifact.Artifact.deserialize, - ) - return self._stubs['get_artifact'] - - @property - def list_artifacts(self) -> Callable[ - [metadata_service.ListArtifactsRequest], - Awaitable[metadata_service.ListArtifactsResponse]]: - r"""Return a callable for the list artifacts method over gRPC. - - Lists Artifacts in the MetadataStore. - - Returns: - Callable[[~.ListArtifactsRequest], - Awaitable[~.ListArtifactsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_artifacts' not in self._stubs: - self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts', - request_serializer=metadata_service.ListArtifactsRequest.serialize, - response_deserializer=metadata_service.ListArtifactsResponse.deserialize, - ) - return self._stubs['list_artifacts'] - - @property - def update_artifact(self) -> Callable[ - [metadata_service.UpdateArtifactRequest], - Awaitable[gca_artifact.Artifact]]: - r"""Return a callable for the update artifact method over gRPC. - - Updates a stored Artifact. - - Returns: - Callable[[~.UpdateArtifactRequest], - Awaitable[~.Artifact]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_artifact' not in self._stubs: - self._stubs['update_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact', - request_serializer=metadata_service.UpdateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['update_artifact'] - - @property - def create_context(self) -> Callable[ - [metadata_service.CreateContextRequest], - Awaitable[gca_context.Context]]: - r"""Return a callable for the create context method over gRPC. - - Creates a Context associated with a MetadataStore. - - Returns: - Callable[[~.CreateContextRequest], - Awaitable[~.Context]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_context' not in self._stubs: - self._stubs['create_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext', - request_serializer=metadata_service.CreateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['create_context'] - - @property - def get_context(self) -> Callable[ - [metadata_service.GetContextRequest], - Awaitable[context.Context]]: - r"""Return a callable for the get context method over gRPC. - - Retrieves a specific Context. - - Returns: - Callable[[~.GetContextRequest], - Awaitable[~.Context]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_context' not in self._stubs: - self._stubs['get_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetContext', - request_serializer=metadata_service.GetContextRequest.serialize, - response_deserializer=context.Context.deserialize, - ) - return self._stubs['get_context'] - - @property - def list_contexts(self) -> Callable[ - [metadata_service.ListContextsRequest], - Awaitable[metadata_service.ListContextsResponse]]: - r"""Return a callable for the list contexts method over gRPC. - - Lists Contexts on the MetadataStore. - - Returns: - Callable[[~.ListContextsRequest], - Awaitable[~.ListContextsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_contexts' not in self._stubs: - self._stubs['list_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts', - request_serializer=metadata_service.ListContextsRequest.serialize, - response_deserializer=metadata_service.ListContextsResponse.deserialize, - ) - return self._stubs['list_contexts'] - - @property - def update_context(self) -> Callable[ - [metadata_service.UpdateContextRequest], - Awaitable[gca_context.Context]]: - r"""Return a callable for the update context method over gRPC. - - Updates a stored Context. - - Returns: - Callable[[~.UpdateContextRequest], - Awaitable[~.Context]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_context' not in self._stubs: - self._stubs['update_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext', - request_serializer=metadata_service.UpdateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['update_context'] - - @property - def delete_context(self) -> Callable[ - [metadata_service.DeleteContextRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete context method over gRPC. - - Deletes a stored Context. - - Returns: - Callable[[~.DeleteContextRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_context' not in self._stubs: - self._stubs['delete_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext', - request_serializer=metadata_service.DeleteContextRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_context'] - - @property - def add_context_artifacts_and_executions(self) -> Callable[ - [metadata_service.AddContextArtifactsAndExecutionsRequest], - Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse]]: - r"""Return a callable for the add context artifacts and - executions method over gRPC. - - Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Returns: - Callable[[~.AddContextArtifactsAndExecutionsRequest], - Awaitable[~.AddContextArtifactsAndExecutionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_artifacts_and_executions' not in self._stubs: - self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions', - request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, - response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, - ) - return self._stubs['add_context_artifacts_and_executions'] - - @property - def add_context_children(self) -> Callable[ - [metadata_service.AddContextChildrenRequest], - Awaitable[metadata_service.AddContextChildrenResponse]]: - r"""Return a callable for the add context children method over gRPC. - - Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Returns: - Callable[[~.AddContextChildrenRequest], - Awaitable[~.AddContextChildrenResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_children' not in self._stubs: - self._stubs['add_context_children'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren', - request_serializer=metadata_service.AddContextChildrenRequest.serialize, - response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, - ) - return self._stubs['add_context_children'] - - @property - def query_context_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryContextLineageSubgraphRequest], - Awaitable[lineage_subgraph.LineageSubgraph]]: - r"""Return a callable for the query context lineage subgraph method over gRPC. - - Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Returns: - Callable[[~.QueryContextLineageSubgraphRequest], - Awaitable[~.LineageSubgraph]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_context_lineage_subgraph' not in self._stubs: - self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph', - request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_context_lineage_subgraph'] - - @property - def create_execution(self) -> Callable[ - [metadata_service.CreateExecutionRequest], - Awaitable[gca_execution.Execution]]: - r"""Return a callable for the create execution method over gRPC. - - Creates an Execution associated with a MetadataStore. - - Returns: - Callable[[~.CreateExecutionRequest], - Awaitable[~.Execution]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_execution' not in self._stubs: - self._stubs['create_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution', - request_serializer=metadata_service.CreateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['create_execution'] - - @property - def get_execution(self) -> Callable[ - [metadata_service.GetExecutionRequest], - Awaitable[execution.Execution]]: - r"""Return a callable for the get execution method over gRPC. - - Retrieves a specific Execution. - - Returns: - Callable[[~.GetExecutionRequest], - Awaitable[~.Execution]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_execution' not in self._stubs: - self._stubs['get_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution', - request_serializer=metadata_service.GetExecutionRequest.serialize, - response_deserializer=execution.Execution.deserialize, - ) - return self._stubs['get_execution'] - - @property - def list_executions(self) -> Callable[ - [metadata_service.ListExecutionsRequest], - Awaitable[metadata_service.ListExecutionsResponse]]: - r"""Return a callable for the list executions method over gRPC. - - Lists Executions in the MetadataStore. - - Returns: - Callable[[~.ListExecutionsRequest], - Awaitable[~.ListExecutionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_executions' not in self._stubs: - self._stubs['list_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions', - request_serializer=metadata_service.ListExecutionsRequest.serialize, - response_deserializer=metadata_service.ListExecutionsResponse.deserialize, - ) - return self._stubs['list_executions'] - - @property - def update_execution(self) -> Callable[ - [metadata_service.UpdateExecutionRequest], - Awaitable[gca_execution.Execution]]: - r"""Return a callable for the update execution method over gRPC. - - Updates a stored Execution. - - Returns: - Callable[[~.UpdateExecutionRequest], - Awaitable[~.Execution]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_execution' not in self._stubs: - self._stubs['update_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution', - request_serializer=metadata_service.UpdateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['update_execution'] - - @property - def add_execution_events(self) -> Callable[ - [metadata_service.AddExecutionEventsRequest], - Awaitable[metadata_service.AddExecutionEventsResponse]]: - r"""Return a callable for the add execution events method over gRPC. - - Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Returns: - Callable[[~.AddExecutionEventsRequest], - Awaitable[~.AddExecutionEventsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_execution_events' not in self._stubs: - self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents', - request_serializer=metadata_service.AddExecutionEventsRequest.serialize, - response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, - ) - return self._stubs['add_execution_events'] - - @property - def query_execution_inputs_and_outputs(self) -> Callable[ - [metadata_service.QueryExecutionInputsAndOutputsRequest], - Awaitable[lineage_subgraph.LineageSubgraph]]: - r"""Return a callable for the query execution inputs and - outputs method over gRPC. - - Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Returns: - Callable[[~.QueryExecutionInputsAndOutputsRequest], - Awaitable[~.LineageSubgraph]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_execution_inputs_and_outputs' not in self._stubs: - self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs', - request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_execution_inputs_and_outputs'] - - @property - def create_metadata_schema(self) -> Callable[ - [metadata_service.CreateMetadataSchemaRequest], - Awaitable[gca_metadata_schema.MetadataSchema]]: - r"""Return a callable for the create metadata schema method over gRPC. - - Creates a MetadataSchema. - - Returns: - Callable[[~.CreateMetadataSchemaRequest], - Awaitable[~.MetadataSchema]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_schema' not in self._stubs: - self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema', - request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, - response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['create_metadata_schema'] - - @property - def get_metadata_schema(self) -> Callable[ - [metadata_service.GetMetadataSchemaRequest], - Awaitable[metadata_schema.MetadataSchema]]: - r"""Return a callable for the get metadata schema method over gRPC. - - Retrieves a specific MetadataSchema. - - Returns: - Callable[[~.GetMetadataSchemaRequest], - Awaitable[~.MetadataSchema]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_schema' not in self._stubs: - self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema', - request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, - response_deserializer=metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['get_metadata_schema'] - - @property - def list_metadata_schemas(self) -> Callable[ - [metadata_service.ListMetadataSchemasRequest], - Awaitable[metadata_service.ListMetadataSchemasResponse]]: - r"""Return a callable for the list metadata schemas method over gRPC. - - Lists MetadataSchemas. - - Returns: - Callable[[~.ListMetadataSchemasRequest], - Awaitable[~.ListMetadataSchemasResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_schemas' not in self._stubs: - self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas', - request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, - response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, - ) - return self._stubs['list_metadata_schemas'] - - @property - def query_artifact_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryArtifactLineageSubgraphRequest], - Awaitable[lineage_subgraph.LineageSubgraph]]: - r"""Return a callable for the query artifact lineage - subgraph method over gRPC. - - Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Returns: - Callable[[~.QueryArtifactLineageSubgraphRequest], - Awaitable[~.LineageSubgraph]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_artifact_lineage_subgraph' not in self._stubs: - self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph', - request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_artifact_lineage_subgraph'] - - -__all__ = ( - 'MetadataServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py deleted file mode 100644 index b32b10b1d7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import MigrationServiceClient -from .async_client import MigrationServiceAsyncClient - -__all__ = ( - 'MigrationServiceClient', - 'MigrationServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py deleted file mode 100644 index 123a0fd369..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.migration_service import pagers -from google.cloud.aiplatform_v1beta1.types import migratable_resource -from google.cloud.aiplatform_v1beta1.types import migration_service -from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport -from .client import MigrationServiceClient - - -class MigrationServiceAsyncClient: - """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - """ - - _client: MigrationServiceClient - - DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT - - annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path) - parse_annotated_dataset_path = staticmethod(MigrationServiceClient.parse_annotated_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - model_path = staticmethod(MigrationServiceClient.model_path) - parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) - model_path = staticmethod(MigrationServiceClient.model_path) - parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) - version_path = staticmethod(MigrationServiceClient.version_path) - parse_version_path = staticmethod(MigrationServiceClient.parse_version_path) - common_billing_account_path = staticmethod(MigrationServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(MigrationServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(MigrationServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(MigrationServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(MigrationServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(MigrationServiceClient.parse_common_organization_path) - common_project_path = staticmethod(MigrationServiceClient.common_project_path) - parse_common_project_path = staticmethod(MigrationServiceClient.parse_common_project_path) - common_location_path = staticmethod(MigrationServiceClient.common_location_path) - parse_common_location_path = staticmethod(MigrationServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceAsyncClient: The constructed client. - """ - return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceAsyncClient: The constructed client. - """ - return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MigrationServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MigrationServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, MigrationServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the migration service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.MigrationServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = MigrationServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def search_migratable_resources(self, - request: migration_service.SearchMigratableResourcesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchMigratableResourcesAsyncPager: - r"""Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest`): - The request object. Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - parent (:class:`str`): - Required. The location that the migratable resources - should be searched from. It's the Vertex AI location - that the resources can be migrated to, not the - resources' original location. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: - Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = migration_service.SearchMigratableResourcesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_migratable_resources, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchMigratableResourcesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def batch_migrate_resources(self, - request: migration_service.BatchMigrateResourcesRequest = None, - *, - parent: str = None, - migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest`): - The request object. Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - parent (:class:`str`): - Required. The location of the migrated resource will - live in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - migrate_resource_requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]`): - Required. The request messages - specifying the resources to migrate. - They must be in the same location as the - destination. Up to 50 resources can be - migrated in one batch. - - This corresponds to the ``migrate_resource_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` - Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = migration_service.BatchMigrateResourcesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if migrate_resource_requests: - request.migrate_resource_requests.extend(migrate_resource_requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_migrate_resources, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - migration_service.BatchMigrateResourcesResponse, - metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MigrationServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py deleted file mode 100644 index bdd4821e25..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ /dev/null @@ -1,617 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.migration_service import pagers -from google.cloud.aiplatform_v1beta1.types import migratable_resource -from google.cloud.aiplatform_v1beta1.types import migration_service -from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import MigrationServiceGrpcTransport -from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport - - -class MigrationServiceClientMeta(type): - """Metaclass for the MigrationService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] - _transport_registry["grpc"] = MigrationServiceGrpcTransport - _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[MigrationServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class MigrationServiceClient(metaclass=MigrationServiceClientMeta): - """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MigrationServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MigrationServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotated_dataset_path(project: str,dataset: str,annotated_dataset: str,) -> str: - """Returns a fully-qualified annotated_dataset string.""" - return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) - - @staticmethod - def parse_annotated_dataset_path(path: str) -> Dict[str,str]: - """Parses a annotated_dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def version_path(project: str,model: str,version: str,) -> str: - """Returns a fully-qualified version string.""" - return "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) - - @staticmethod - def parse_version_path(path: str) -> Dict[str,str]: - """Parses a version path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, MigrationServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the migration service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, MigrationServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, MigrationServiceTransport): - # transport is a MigrationServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def search_migratable_resources(self, - request: migration_service.SearchMigratableResourcesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchMigratableResourcesPager: - r"""Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): - The request object. Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - parent (str): - Required. The location that the migratable resources - should be searched from. It's the Vertex AI location - that the resources can be migrated to, not the - resources' original location. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager: - Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a migration_service.SearchMigratableResourcesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, migration_service.SearchMigratableResourcesRequest): - request = migration_service.SearchMigratableResourcesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_migratable_resources] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchMigratableResourcesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def batch_migrate_resources(self, - request: migration_service.BatchMigrateResourcesRequest = None, - *, - parent: str = None, - migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Args: - request (google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest): - The request object. Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - parent (str): - Required. The location of the migrated resource will - live in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - migrate_resource_requests (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]): - Required. The request messages - specifying the resources to migrate. - They must be in the same location as the - destination. Up to 50 resources can be - migrated in one batch. - - This corresponds to the ``migrate_resource_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` - Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a migration_service.BatchMigrateResourcesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, migration_service.BatchMigrateResourcesRequest): - request = migration_service.BatchMigrateResourcesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if migrate_resource_requests is not None: - request.migrate_resource_requests = migrate_resource_requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - migration_service.BatchMigrateResourcesResponse, - metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MigrationServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py deleted file mode 100644 index c15610a912..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import migratable_resource -from google.cloud.aiplatform_v1beta1.types import migration_service - - -class SearchMigratableResourcesPager: - """A pager for iterating through ``search_migratable_resources`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``migratable_resources`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchMigratableResources`` requests and continue to iterate - through the ``migratable_resources`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., migration_service.SearchMigratableResourcesResponse], - request: migration_service.SearchMigratableResourcesRequest, - response: migration_service.SearchMigratableResourcesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = migration_service.SearchMigratableResourcesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[migration_service.SearchMigratableResourcesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[migratable_resource.MigratableResource]: - for page in self.pages: - yield from page.migratable_resources - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchMigratableResourcesAsyncPager: - """A pager for iterating through ``search_migratable_resources`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``migratable_resources`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchMigratableResources`` requests and continue to iterate - through the ``migratable_resources`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[migration_service.SearchMigratableResourcesResponse]], - request: migration_service.SearchMigratableResourcesRequest, - response: migration_service.SearchMigratableResourcesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = migration_service.SearchMigratableResourcesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[migration_service.SearchMigratableResourcesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[migratable_resource.MigratableResource]: - async def async_generator(): - async for page in self.pages: - for response in page.migratable_resources: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py deleted file mode 100644 index 8f036c410e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import MigrationServiceTransport -from .grpc import MigrationServiceGrpcTransport -from .grpc_asyncio import MigrationServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] -_transport_registry['grpc'] = MigrationServiceGrpcTransport -_transport_registry['grpc_asyncio'] = MigrationServiceGrpcAsyncIOTransport - -__all__ = ( - 'MigrationServiceTransport', - 'MigrationServiceGrpcTransport', - 'MigrationServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py deleted file mode 100644 index 2d86b81d6b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py +++ /dev/null @@ -1,189 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class MigrationServiceTransport(abc.ABC): - """Abstract transport class for MigrationService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.search_migratable_resources: gapic_v1.method.wrap_method( - self.search_migratable_resources, - default_timeout=None, - client_info=client_info, - ), - self.batch_migrate_resources: gapic_v1.method.wrap_method( - self.batch_migrate_resources, - default_timeout=None, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - Union[ - migration_service.SearchMigratableResourcesResponse, - Awaitable[migration_service.SearchMigratableResourcesResponse] - ]]: - raise NotImplementedError() - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'MigrationServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py deleted file mode 100644 index 4729745415..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py +++ /dev/null @@ -1,303 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore -from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO - - -class MigrationServiceGrpcTransport(MigrationServiceTransport): - """gRPC backend transport for MigrationService. - - A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - migration_service.SearchMigratableResourcesResponse]: - r"""Return a callable for the search migratable resources method over gRPC. - - Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Returns: - Callable[[~.SearchMigratableResourcesRequest], - ~.SearchMigratableResourcesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_migratable_resources' not in self._stubs: - self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources', - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, - ) - return self._stubs['search_migratable_resources'] - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch migrate resources method over gRPC. - - Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Returns: - Callable[[~.BatchMigrateResourcesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_migrate_resources' not in self._stubs: - self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources', - request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_migrate_resources'] - - -__all__ = ( - 'MigrationServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py deleted file mode 100644 index 5643065d24..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,307 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore -from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import MigrationServiceGrpcTransport - - -class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): - """gRPC AsyncIO backend transport for MigrationService. - - A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - Awaitable[migration_service.SearchMigratableResourcesResponse]]: - r"""Return a callable for the search migratable resources method over gRPC. - - Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Returns: - Callable[[~.SearchMigratableResourcesRequest], - Awaitable[~.SearchMigratableResourcesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_migratable_resources' not in self._stubs: - self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources', - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, - ) - return self._stubs['search_migratable_resources'] - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch migrate resources method over gRPC. - - Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Returns: - Callable[[~.BatchMigrateResourcesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_migrate_resources' not in self._stubs: - self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources', - request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_migrate_resources'] - - -__all__ = ( - 'MigrationServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py deleted file mode 100644 index 5c4d570d15..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import ModelServiceClient -from .async_client import ModelServiceAsyncClient - -__all__ = ( - 'ModelServiceClient', - 'ModelServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py deleted file mode 100644 index 3d71a64ff8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ /dev/null @@ -1,1060 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.model_service import pagers -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport -from .client import ModelServiceClient - - -class ModelServiceAsyncClient: - """A service for managing Vertex AI's machine learning Models.""" - - _client: ModelServiceClient - - DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(ModelServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(ModelServiceClient.parse_endpoint_path) - model_path = staticmethod(ModelServiceClient.model_path) - parse_model_path = staticmethod(ModelServiceClient.parse_model_path) - model_evaluation_path = staticmethod(ModelServiceClient.model_evaluation_path) - parse_model_evaluation_path = staticmethod(ModelServiceClient.parse_model_evaluation_path) - model_evaluation_slice_path = staticmethod(ModelServiceClient.model_evaluation_slice_path) - parse_model_evaluation_slice_path = staticmethod(ModelServiceClient.parse_model_evaluation_slice_path) - training_pipeline_path = staticmethod(ModelServiceClient.training_pipeline_path) - parse_training_pipeline_path = staticmethod(ModelServiceClient.parse_training_pipeline_path) - common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(ModelServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(ModelServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path) - common_project_path = staticmethod(ModelServiceClient.common_project_path) - parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path) - common_location_path = staticmethod(ModelServiceClient.common_location_path) - parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceAsyncClient: The constructed client. - """ - return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceAsyncClient: The constructed client. - """ - return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ModelServiceTransport: - """Returns the transport used by the client instance. - - Returns: - ModelServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, ModelServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the model service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.ModelServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = ModelServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def upload_model(self, - request: model_service.UploadModelRequest = None, - *, - parent: str = None, - model: gca_model.Model = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Uploads a Model artifact into Vertex AI. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UploadModelRequest`): - The request object. Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. - parent (:class:`str`): - Required. The resource name of the Location into which - to upload the Model. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): - Required. The Model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` - Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.UploadModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.upload_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - model_service.UploadModelResponse, - metadata_type=model_service.UploadModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model(self, - request: model_service.GetModelRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a Model. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelRequest`): - The request object. Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. - name (:class:`str`): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_models(self, - request: model_service.ListModelsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsAsyncPager: - r"""Lists Models in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelsRequest`): - The request object. Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. - parent (:class:`str`): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsAsyncPager: - Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_models, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_model(self, - request: model_service.UpdateModelRequest = None, - *, - model: gca_model.Model = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a Model. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateModelRequest`): - The request object. Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. - model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): - Required. The Model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.UpdateModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_model(self, - request: model_service.DeleteModelRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteModelRequest`): - The request object. Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. - name (:class:`str`): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.DeleteModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_model(self, - request: model_service.ExportModelRequest = None, - *, - name: str = None, - output_config: model_service.ExportModelRequest.OutputConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports a trained, exportable, Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ExportModelRequest`): - The request object. Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. - name (:class:`str`): - Required. The resource name of the Model to export. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig`): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` - Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ExportModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - model_service.ExportModelResponse, - metadata_type=model_service.ExportModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation(self, - request: model_service.GetModelEvaluationRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a ModelEvaluation. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest`): - The request object. Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. - name (:class:`str`): - Required. The name of the ModelEvaluation resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelEvaluation: - A collection of metrics calculated by - comparing Model's predictions on all of - the test data against annotations from - the test data. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelEvaluationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluations(self, - request: model_service.ListModelEvaluationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsAsyncPager: - r"""Lists ModelEvaluations in a Model. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest`): - The request object. Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - parent (:class:`str`): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsAsyncPager: - Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelEvaluationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluations, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation_slice(self, - request: model_service.GetModelEvaluationSliceRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation_slice.ModelEvaluationSlice: - r"""Gets a ModelEvaluationSlice. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest`): - The request object. Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. - name (:class:`str`): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice: - A collection of metrics calculated by - comparing Model's predictions on a slice - of the test data against ground truth - annotations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelEvaluationSliceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation_slice, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluation_slices(self, - request: model_service.ListModelEvaluationSlicesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationSlicesAsyncPager: - r"""Lists ModelEvaluationSlices in a ModelEvaluation. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest`): - The request object. Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - parent (:class:`str`): - Required. The resource name of the ModelEvaluation to - list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: - Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelEvaluationSlicesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluation_slices, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationSlicesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ModelServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py deleted file mode 100644 index 565b9f0598..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ /dev/null @@ -1,1283 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.model_service import pagers -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import ModelServiceGrpcTransport -from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport - - -class ModelServiceClientMeta(type): - """Metaclass for the ModelService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] - _transport_registry["grpc"] = ModelServiceGrpcTransport - _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[ModelServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class ModelServiceClient(metaclass=ModelServiceClientMeta): - """A service for managing Vertex AI's machine learning Models.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ModelServiceTransport: - """Returns the transport used by the client instance. - - Returns: - ModelServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_path(project: str,location: str,model: str,evaluation: str,) -> str: - """Returns a fully-qualified model_evaluation string.""" - return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) - - @staticmethod - def parse_model_evaluation_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_slice_path(project: str,location: str,model: str,evaluation: str,slice: str,) -> str: - """Returns a fully-qualified model_evaluation_slice string.""" - return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) - - @staticmethod - def parse_model_evaluation_slice_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation_slice path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: - """Returns a fully-qualified training_pipeline string.""" - return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - - @staticmethod - def parse_training_pipeline_path(path: str) -> Dict[str,str]: - """Parses a training_pipeline path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, ModelServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the model service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ModelServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, ModelServiceTransport): - # transport is a ModelServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def upload_model(self, - request: model_service.UploadModelRequest = None, - *, - parent: str = None, - model: gca_model.Model = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Uploads a Model artifact into Vertex AI. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UploadModelRequest): - The request object. Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. - parent (str): - Required. The resource name of the Location into which - to upload the Model. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` - Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.UploadModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.UploadModelRequest): - request = model_service.UploadModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.upload_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - model_service.UploadModelResponse, - metadata_type=model_service.UploadModelOperationMetadata, - ) - - # Done; return the response. - return response - - def get_model(self, - request: model_service.GetModelRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a Model. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetModelRequest): - The request object. Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. - name (str): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelRequest): - request = model_service.GetModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_models(self, - request: model_service.ListModelsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsPager: - r"""Lists Models in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): - The request object. Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. - parent (str): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsPager: - Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelsRequest): - request = model_service.ListModelsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_models] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_model(self, - request: model_service.UpdateModelRequest = None, - *, - model: gca_model.Model = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a Model. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateModelRequest): - The request object. Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. - model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.UpdateModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.UpdateModelRequest): - request = model_service.UpdateModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_model(self, - request: model_service.DeleteModelRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteModelRequest): - The request object. Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. - name (str): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.DeleteModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.DeleteModelRequest): - request = model_service.DeleteModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def export_model(self, - request: model_service.ExportModelRequest = None, - *, - name: str = None, - output_config: model_service.ExportModelRequest.OutputConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports a trained, exportable, Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ExportModelRequest): - The request object. Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. - name (str): - Required. The resource name of the Model to export. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` - Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ExportModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ExportModelRequest): - request = model_service.ExportModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - model_service.ExportModelResponse, - metadata_type=model_service.ExportModelOperationMetadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation(self, - request: model_service.GetModelEvaluationRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a ModelEvaluation. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest): - The request object. Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. - name (str): - Required. The name of the ModelEvaluation resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelEvaluation: - A collection of metrics calculated by - comparing Model's predictions on all of - the test data against annotations from - the test data. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelEvaluationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelEvaluationRequest): - request = model_service.GetModelEvaluationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluations(self, - request: model_service.ListModelEvaluationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsPager: - r"""Lists ModelEvaluations in a Model. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): - The request object. Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - parent (str): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsPager: - Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelEvaluationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelEvaluationsRequest): - request = model_service.ListModelEvaluationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation_slice(self, - request: model_service.GetModelEvaluationSliceRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation_slice.ModelEvaluationSlice: - r"""Gets a ModelEvaluationSlice. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest): - The request object. Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. - name (str): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice: - A collection of metrics calculated by - comparing Model's predictions on a slice - of the test data against ground truth - annotations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelEvaluationSliceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelEvaluationSliceRequest): - request = model_service.GetModelEvaluationSliceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation_slice] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluation_slices(self, - request: model_service.ListModelEvaluationSlicesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationSlicesPager: - r"""Lists ModelEvaluationSlices in a ModelEvaluation. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): - The request object. Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - parent (str): - Required. The resource name of the ModelEvaluation to - list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesPager: - Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelEvaluationSlicesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelEvaluationSlicesRequest): - request = model_service.ListModelEvaluationSlicesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluation_slices] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationSlicesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ModelServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py deleted file mode 100644 index 1825a3c784..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service - - -class ListModelsPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``models`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``models`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelsResponse], - request: model_service.ListModelsRequest, - response: model_service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[model_service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[model.Model]: - for page in self.pages: - yield from page.models - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelsAsyncPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``models`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``models`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelsResponse]], - request: model_service.ListModelsRequest, - response: model_service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[model_service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[model.Model]: - async def async_generator(): - async for page in self.pages: - for response in page.models: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluations`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelEvaluationsResponse], - request: model_service.ListModelEvaluationsRequest, - response: model_service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[model_service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]: - for page in self.pages: - yield from page.model_evaluations - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsAsyncPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluations`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]], - request: model_service.ListModelEvaluationsRequest, - response: model_service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluations: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationSlicesPager: - """A pager for iterating through ``list_model_evaluation_slices`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluation_slices`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluationSlices`` requests and continue to iterate - through the ``model_evaluation_slices`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelEvaluationSlicesResponse], - request: model_service.ListModelEvaluationSlicesRequest, - response: model_service.ListModelEvaluationSlicesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationSlicesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[model_service.ListModelEvaluationSlicesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[model_evaluation_slice.ModelEvaluationSlice]: - for page in self.pages: - yield from page.model_evaluation_slices - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationSlicesAsyncPager: - """A pager for iterating through ``list_model_evaluation_slices`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluation_slices`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluationSlices`` requests and continue to iterate - through the ``model_evaluation_slices`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelEvaluationSlicesResponse]], - request: model_service.ListModelEvaluationSlicesRequest, - response: model_service.ListModelEvaluationSlicesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationSlicesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationSlicesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[model_evaluation_slice.ModelEvaluationSlice]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluation_slices: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py deleted file mode 100644 index 0f09224d3c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import ModelServiceTransport -from .grpc import ModelServiceGrpcTransport -from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] -_transport_registry['grpc'] = ModelServiceGrpcTransport -_transport_registry['grpc_asyncio'] = ModelServiceGrpcAsyncIOTransport - -__all__ = ( - 'ModelServiceTransport', - 'ModelServiceGrpcTransport', - 'ModelServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py deleted file mode 100644 index f8b2d01811..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ /dev/null @@ -1,305 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class ModelServiceTransport(abc.ABC): - """Abstract transport class for ModelService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.upload_model: gapic_v1.method.wrap_method( - self.upload_model, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model: gapic_v1.method.wrap_method( - self.get_model, - default_timeout=5.0, - client_info=client_info, - ), - self.list_models: gapic_v1.method.wrap_method( - self.list_models, - default_timeout=5.0, - client_info=client_info, - ), - self.update_model: gapic_v1.method.wrap_method( - self.update_model, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_model: gapic_v1.method.wrap_method( - self.delete_model, - default_timeout=5.0, - client_info=client_info, - ), - self.export_model: gapic_v1.method.wrap_method( - self.export_model, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_evaluation: gapic_v1.method.wrap_method( - self.get_model_evaluation, - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_evaluations: gapic_v1.method.wrap_method( - self.list_model_evaluations, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_evaluation_slice: gapic_v1.method.wrap_method( - self.get_model_evaluation_slice, - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_evaluation_slices: gapic_v1.method.wrap_method( - self.list_model_evaluation_slices, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - Union[ - model.Model, - Awaitable[model.Model] - ]]: - raise NotImplementedError() - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - Union[ - model_service.ListModelsResponse, - Awaitable[model_service.ListModelsResponse] - ]]: - raise NotImplementedError() - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - Union[ - gca_model.Model, - Awaitable[gca_model.Model] - ]]: - raise NotImplementedError() - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - Union[ - model_evaluation.ModelEvaluation, - Awaitable[model_evaluation.ModelEvaluation] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - Union[ - model_service.ListModelEvaluationsResponse, - Awaitable[model_service.ListModelEvaluationsResponse] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - Union[ - model_evaluation_slice.ModelEvaluationSlice, - Awaitable[model_evaluation_slice.ModelEvaluationSlice] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - Union[ - model_service.ListModelEvaluationSlicesResponse, - Awaitable[model_service.ListModelEvaluationSlicesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'ModelServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py deleted file mode 100644 index e699f90533..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ /dev/null @@ -1,514 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.longrunning import operations_pb2 # type: ignore -from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO - - -class ModelServiceGrpcTransport(ModelServiceTransport): - """gRPC backend transport for ModelService. - - A service for managing Vertex AI's machine learning Models. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the upload model method over gRPC. - - Uploads a Model artifact into Vertex AI. - - Returns: - Callable[[~.UploadModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'upload_model' not in self._stubs: - self._stubs['upload_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/UploadModel', - request_serializer=model_service.UploadModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['upload_model'] - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - model.Model]: - r"""Return a callable for the get model method over gRPC. - - Gets a Model. - - Returns: - Callable[[~.GetModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModel', - request_serializer=model_service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - model_service.ListModelsResponse]: - r"""Return a callable for the list models method over gRPC. - - Lists Models in a Location. - - Returns: - Callable[[~.ListModelsRequest], - ~.ListModelsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModels', - request_serializer=model_service.ListModelsRequest.serialize, - response_deserializer=model_service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - gca_model.Model]: - r"""Return a callable for the update model method over gRPC. - - Updates a Model. - - Returns: - Callable[[~.UpdateModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/UpdateModel', - request_serializer=model_service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. - - Returns: - Callable[[~.DeleteModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModel', - request_serializer=model_service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, exportable, Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - - Returns: - Callable[[~.ExportModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ExportModel', - request_serializer=model_service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - model_evaluation.ModelEvaluation]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a ModelEvaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - ~.ModelEvaluation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluation', - request_serializer=model_service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - model_service.ListModelEvaluationsResponse]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists ModelEvaluations in a Model. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - ~.ListModelEvaluationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluations', - request_serializer=model_service.ListModelEvaluationsRequest.serialize, - response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - model_evaluation_slice.ModelEvaluationSlice]: - r"""Return a callable for the get model evaluation slice method over gRPC. - - Gets a ModelEvaluationSlice. - - Returns: - Callable[[~.GetModelEvaluationSliceRequest], - ~.ModelEvaluationSlice]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation_slice' not in self._stubs: - self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluationSlice', - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, - ) - return self._stubs['get_model_evaluation_slice'] - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - model_service.ListModelEvaluationSlicesResponse]: - r"""Return a callable for the list model evaluation slices method over gRPC. - - Lists ModelEvaluationSlices in a ModelEvaluation. - - Returns: - Callable[[~.ListModelEvaluationSlicesRequest], - ~.ListModelEvaluationSlicesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluation_slices' not in self._stubs: - self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluationSlices', - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, - ) - return self._stubs['list_model_evaluation_slices'] - - -__all__ = ( - 'ModelServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py deleted file mode 100644 index 0fdae7300c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,518 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.longrunning import operations_pb2 # type: ignore -from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import ModelServiceGrpcTransport - - -class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): - """gRPC AsyncIO backend transport for ModelService. - - A service for managing Vertex AI's machine learning Models. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the upload model method over gRPC. - - Uploads a Model artifact into Vertex AI. - - Returns: - Callable[[~.UploadModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'upload_model' not in self._stubs: - self._stubs['upload_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/UploadModel', - request_serializer=model_service.UploadModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['upload_model'] - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - Awaitable[model.Model]]: - r"""Return a callable for the get model method over gRPC. - - Gets a Model. - - Returns: - Callable[[~.GetModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModel', - request_serializer=model_service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - Awaitable[model_service.ListModelsResponse]]: - r"""Return a callable for the list models method over gRPC. - - Lists Models in a Location. - - Returns: - Callable[[~.ListModelsRequest], - Awaitable[~.ListModelsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModels', - request_serializer=model_service.ListModelsRequest.serialize, - response_deserializer=model_service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - Awaitable[gca_model.Model]]: - r"""Return a callable for the update model method over gRPC. - - Updates a Model. - - Returns: - Callable[[~.UpdateModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/UpdateModel', - request_serializer=model_service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a Model. - Note: Model can only be deleted if there are no - DeployedModels created from it. - - Returns: - Callable[[~.DeleteModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModel', - request_serializer=model_service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, exportable, Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - - Returns: - Callable[[~.ExportModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ExportModel', - request_serializer=model_service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - Awaitable[model_evaluation.ModelEvaluation]]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a ModelEvaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - Awaitable[~.ModelEvaluation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluation', - request_serializer=model_service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - Awaitable[model_service.ListModelEvaluationsResponse]]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists ModelEvaluations in a Model. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - Awaitable[~.ListModelEvaluationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluations', - request_serializer=model_service.ListModelEvaluationsRequest.serialize, - response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - Awaitable[model_evaluation_slice.ModelEvaluationSlice]]: - r"""Return a callable for the get model evaluation slice method over gRPC. - - Gets a ModelEvaluationSlice. - - Returns: - Callable[[~.GetModelEvaluationSliceRequest], - Awaitable[~.ModelEvaluationSlice]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation_slice' not in self._stubs: - self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluationSlice', - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, - ) - return self._stubs['get_model_evaluation_slice'] - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - Awaitable[model_service.ListModelEvaluationSlicesResponse]]: - r"""Return a callable for the list model evaluation slices method over gRPC. - - Lists ModelEvaluationSlices in a ModelEvaluation. - - Returns: - Callable[[~.ListModelEvaluationSlicesRequest], - Awaitable[~.ListModelEvaluationSlicesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluation_slices' not in self._stubs: - self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluationSlices', - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, - ) - return self._stubs['list_model_evaluation_slices'] - - -__all__ = ( - 'ModelServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py deleted file mode 100644 index 539616023d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PipelineServiceClient -from .async_client import PipelineServiceAsyncClient - -__all__ = ( - 'PipelineServiceClient', - 'PipelineServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py deleted file mode 100644 index 98a7435d45..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ /dev/null @@ -1,1069 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport -from .client import PipelineServiceClient - - -class PipelineServiceAsyncClient: - """A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex - Pipelines). - """ - - _client: PipelineServiceClient - - DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT - - artifact_path = staticmethod(PipelineServiceClient.artifact_path) - parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) - context_path = staticmethod(PipelineServiceClient.context_path) - parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) - custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) - endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) - execution_path = staticmethod(PipelineServiceClient.execution_path) - parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) - model_path = staticmethod(PipelineServiceClient.model_path) - parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) - network_path = staticmethod(PipelineServiceClient.network_path) - parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) - pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) - parse_pipeline_job_path = staticmethod(PipelineServiceClient.parse_pipeline_job_path) - training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) - parse_training_pipeline_path = staticmethod(PipelineServiceClient.parse_training_pipeline_path) - common_billing_account_path = staticmethod(PipelineServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PipelineServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PipelineServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PipelineServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PipelineServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PipelineServiceClient.common_project_path) - parse_common_project_path = staticmethod(PipelineServiceClient.parse_common_project_path) - common_location_path = staticmethod(PipelineServiceClient.common_location_path) - parse_common_location_path = staticmethod(PipelineServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceAsyncClient: The constructed client. - """ - return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceAsyncClient: The constructed client. - """ - return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PipelineServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PipelineServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the pipeline service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PipelineServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PipelineServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_training_pipeline(self, - request: pipeline_service.CreateTrainingPipelineRequest = None, - *, - parent: str = None, - training_pipeline: gca_training_pipeline.TrainingPipeline = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_training_pipeline.TrainingPipeline: - r"""Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest`): - The request object. Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. - parent (:class:`str`): - Required. The resource name of the Location to create - the TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - training_pipeline (:class:`google.cloud.aiplatform_v1beta1.types.TrainingPipeline`): - Required. The TrainingPipeline to - create. - - This corresponds to the ``training_pipeline`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CreateTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if training_pipeline is not None: - request.training_pipeline = training_pipeline - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_training_pipeline(self, - request: pipeline_service.GetTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> training_pipeline.TrainingPipeline: - r"""Gets a TrainingPipeline. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest`): - The request object. Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline resource. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.GetTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_training_pipelines(self, - request: pipeline_service.ListTrainingPipelinesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrainingPipelinesAsyncPager: - r"""Lists TrainingPipelines in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest`): - The request object. Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. - parent (:class:`str`): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: - Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.ListTrainingPipelinesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_training_pipelines, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTrainingPipelinesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_training_pipeline(self, - request: pipeline_service.DeleteTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TrainingPipeline. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest`): - The request object. Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline resource to - be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.DeleteTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_training_pipeline(self, - request: pipeline_service.CancelTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest`): - The request object. Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CancelTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_pipeline_job(self, - request: pipeline_service.CreatePipelineJobRequest = None, - *, - parent: str = None, - pipeline_job: gca_pipeline_job.PipelineJob = None, - pipeline_job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_pipeline_job.PipelineJob: - r"""Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest`): - The request object. Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job (:class:`google.cloud.aiplatform_v1beta1.types.PipelineJob`): - Required. The PipelineJob to create. - This corresponds to the ``pipeline_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job_id (:class:`str`): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not - provided, an ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``pipeline_job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CreatePipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if pipeline_job is not None: - request.pipeline_job = pipeline_job - if pipeline_job_id is not None: - request.pipeline_job_id = pipeline_job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_pipeline_job(self, - request: pipeline_service.GetPipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pipeline_job.PipelineJob: - r"""Gets a PipelineJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest`): - The request object. Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.GetPipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_pipeline_jobs(self, - request: pipeline_service.ListPipelineJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListPipelineJobsAsyncPager: - r"""Lists PipelineJobs in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest`): - The request object. Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: - Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.ListPipelineJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_pipeline_jobs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListPipelineJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_pipeline_job(self, - request: pipeline_service.DeletePipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a PipelineJob. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest`): - The request object. Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.DeletePipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_pipeline_job(self, - request: pipeline_service.CancelPipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] - is set to ``CANCELLED``. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest`): - The request object. Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CancelPipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PipelineServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py deleted file mode 100644 index df8276c2e8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ /dev/null @@ -1,1328 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PipelineServiceGrpcTransport -from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport - - -class PipelineServiceClientMeta(type): - """Metaclass for the PipelineService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] - _transport_registry["grpc"] = PipelineServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[PipelineServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PipelineServiceClient(metaclass=PipelineServiceClientMeta): - """A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex - Pipelines). - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PipelineServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PipelineServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: - """Returns a fully-qualified artifact string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - - @staticmethod - def parse_artifact_path(path: str) -> Dict[str,str]: - """Parses a artifact path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: - """Returns a fully-qualified context string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - - @staticmethod - def parse_context_path(path: str) -> Dict[str,str]: - """Parses a context path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: - """Returns a fully-qualified execution string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - - @staticmethod - def parse_execution_path(path: str) -> Dict[str,str]: - """Parses a execution path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def pipeline_job_path(project: str,location: str,pipeline_job: str,) -> str: - """Returns a fully-qualified pipeline_job string.""" - return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) - - @staticmethod - def parse_pipeline_job_path(path: str) -> Dict[str,str]: - """Parses a pipeline_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: - """Returns a fully-qualified training_pipeline string.""" - return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - - @staticmethod - def parse_training_pipeline_path(path: str) -> Dict[str,str]: - """Parses a training_pipeline path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PipelineServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the pipeline service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PipelineServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PipelineServiceTransport): - # transport is a PipelineServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_training_pipeline(self, - request: pipeline_service.CreateTrainingPipelineRequest = None, - *, - parent: str = None, - training_pipeline: gca_training_pipeline.TrainingPipeline = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_training_pipeline.TrainingPipeline: - r"""Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest): - The request object. Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. - parent (str): - Required. The resource name of the Location to create - the TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - training_pipeline (google.cloud.aiplatform_v1beta1.types.TrainingPipeline): - Required. The TrainingPipeline to - create. - - This corresponds to the ``training_pipeline`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CreateTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CreateTrainingPipelineRequest): - request = pipeline_service.CreateTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if training_pipeline is not None: - request.training_pipeline = training_pipeline - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_training_pipeline(self, - request: pipeline_service.GetTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> training_pipeline.TrainingPipeline: - r"""Gets a TrainingPipeline. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest): - The request object. Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline resource. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.GetTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.GetTrainingPipelineRequest): - request = pipeline_service.GetTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_training_pipelines(self, - request: pipeline_service.ListTrainingPipelinesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrainingPipelinesPager: - r"""Lists TrainingPipelines in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): - The request object. Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. - parent (str): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesPager: - Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.ListTrainingPipelinesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.ListTrainingPipelinesRequest): - request = pipeline_service.ListTrainingPipelinesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_training_pipelines] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTrainingPipelinesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_training_pipeline(self, - request: pipeline_service.DeleteTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TrainingPipeline. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest): - The request object. Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline resource to - be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.DeleteTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.DeleteTrainingPipelineRequest): - request = pipeline_service.DeleteTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_training_pipeline(self, - request: pipeline_service.CancelTrainingPipelineRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest): - The request object. Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CancelTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CancelTrainingPipelineRequest): - request = pipeline_service.CancelTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_pipeline_job(self, - request: pipeline_service.CreatePipelineJobRequest = None, - *, - parent: str = None, - pipeline_job: gca_pipeline_job.PipelineJob = None, - pipeline_job_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_pipeline_job.PipelineJob: - r"""Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest): - The request object. Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. - parent (str): - Required. The resource name of the Location to create - the PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): - Required. The PipelineJob to create. - This corresponds to the ``pipeline_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job_id (str): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not - provided, an ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``pipeline_job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CreatePipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CreatePipelineJobRequest): - request = pipeline_service.CreatePipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if pipeline_job is not None: - request.pipeline_job = pipeline_job - if pipeline_job_id is not None: - request.pipeline_job_id = pipeline_job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_pipeline_job(self, - request: pipeline_service.GetPipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pipeline_job.PipelineJob: - r"""Gets a PipelineJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest): - The request object. Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. - name (str): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.GetPipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.GetPipelineJobRequest): - request = pipeline_service.GetPipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_pipeline_jobs(self, - request: pipeline_service.ListPipelineJobsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListPipelineJobsPager: - r"""Lists PipelineJobs in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): - The request object. Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. - parent (str): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsPager: - Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.ListPipelineJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.ListPipelineJobsRequest): - request = pipeline_service.ListPipelineJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListPipelineJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_pipeline_job(self, - request: pipeline_service.DeletePipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a PipelineJob. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest): - The request object. Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. - name (str): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.DeletePipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.DeletePipelineJobRequest): - request = pipeline_service.DeletePipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_pipeline_job(self, - request: pipeline_service.CancelPipelineJobRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] - is set to ``CANCELLED``. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest): - The request object. Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. - name (str): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CancelPipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CancelPipelineJobRequest): - request = pipeline_service.CancelPipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PipelineServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py deleted file mode 100644 index b997210da0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import training_pipeline - - -class ListTrainingPipelinesPager: - """A pager for iterating through ``list_training_pipelines`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``training_pipelines`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTrainingPipelines`` requests and continue to iterate - through the ``training_pipelines`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., pipeline_service.ListTrainingPipelinesResponse], - request: pipeline_service.ListTrainingPipelinesRequest, - response: pipeline_service.ListTrainingPipelinesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListTrainingPipelinesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[pipeline_service.ListTrainingPipelinesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[training_pipeline.TrainingPipeline]: - for page in self.pages: - yield from page.training_pipelines - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrainingPipelinesAsyncPager: - """A pager for iterating through ``list_training_pipelines`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``training_pipelines`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTrainingPipelines`` requests and continue to iterate - through the ``training_pipelines`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[pipeline_service.ListTrainingPipelinesResponse]], - request: pipeline_service.ListTrainingPipelinesRequest, - response: pipeline_service.ListTrainingPipelinesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListTrainingPipelinesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[pipeline_service.ListTrainingPipelinesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[training_pipeline.TrainingPipeline]: - async def async_generator(): - async for page in self.pages: - for response in page.training_pipelines: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListPipelineJobsPager: - """A pager for iterating through ``list_pipeline_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``pipeline_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListPipelineJobs`` requests and continue to iterate - through the ``pipeline_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., pipeline_service.ListPipelineJobsResponse], - request: pipeline_service.ListPipelineJobsRequest, - response: pipeline_service.ListPipelineJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListPipelineJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[pipeline_service.ListPipelineJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[pipeline_job.PipelineJob]: - for page in self.pages: - yield from page.pipeline_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListPipelineJobsAsyncPager: - """A pager for iterating through ``list_pipeline_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``pipeline_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListPipelineJobs`` requests and continue to iterate - through the ``pipeline_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], - request: pipeline_service.ListPipelineJobsRequest, - response: pipeline_service.ListPipelineJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListPipelineJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[pipeline_service.ListPipelineJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[pipeline_job.PipelineJob]: - async def async_generator(): - async for page in self.pages: - for response in page.pipeline_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py deleted file mode 100644 index 77051d8254..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PipelineServiceTransport -from .grpc import PipelineServiceGrpcTransport -from .grpc_asyncio import PipelineServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] -_transport_registry['grpc'] = PipelineServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PipelineServiceGrpcAsyncIOTransport - -__all__ = ( - 'PipelineServiceTransport', - 'PipelineServiceGrpcTransport', - 'PipelineServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py deleted file mode 100644 index a67aad8787..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ /dev/null @@ -1,306 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class PipelineServiceTransport(abc.ABC): - """Abstract transport class for PipelineService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_training_pipeline: gapic_v1.method.wrap_method( - self.create_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.get_training_pipeline: gapic_v1.method.wrap_method( - self.get_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.list_training_pipelines: gapic_v1.method.wrap_method( - self.list_training_pipelines, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_training_pipeline: gapic_v1.method.wrap_method( - self.delete_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_training_pipeline: gapic_v1.method.wrap_method( - self.cancel_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.create_pipeline_job: gapic_v1.method.wrap_method( - self.create_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.get_pipeline_job: gapic_v1.method.wrap_method( - self.get_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.list_pipeline_jobs: gapic_v1.method.wrap_method( - self.list_pipeline_jobs, - default_timeout=None, - client_info=client_info, - ), - self.delete_pipeline_job: gapic_v1.method.wrap_method( - self.delete_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.cancel_pipeline_job: gapic_v1.method.wrap_method( - self.cancel_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - Union[ - gca_training_pipeline.TrainingPipeline, - Awaitable[gca_training_pipeline.TrainingPipeline] - ]]: - raise NotImplementedError() - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - Union[ - training_pipeline.TrainingPipeline, - Awaitable[training_pipeline.TrainingPipeline] - ]]: - raise NotImplementedError() - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - Union[ - pipeline_service.ListTrainingPipelinesResponse, - Awaitable[pipeline_service.ListTrainingPipelinesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - Union[ - gca_pipeline_job.PipelineJob, - Awaitable[gca_pipeline_job.PipelineJob] - ]]: - raise NotImplementedError() - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - Union[ - pipeline_job.PipelineJob, - Awaitable[pipeline_job.PipelineJob] - ]]: - raise NotImplementedError() - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - Union[ - pipeline_service.ListPipelineJobsResponse, - Awaitable[pipeline_service.ListPipelineJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'PipelineServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py deleted file mode 100644 index 22b2faf143..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ /dev/null @@ -1,539 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO - - -class PipelineServiceGrpcTransport(PipelineServiceTransport): - """gRPC backend transport for PipelineService. - - A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex - Pipelines). - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - gca_training_pipeline.TrainingPipeline]: - r"""Return a callable for the create training pipeline method over gRPC. - - Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Returns: - Callable[[~.CreateTrainingPipelineRequest], - ~.TrainingPipeline]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_training_pipeline' not in self._stubs: - self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CreateTrainingPipeline', - request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, - response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['create_training_pipeline'] - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - training_pipeline.TrainingPipeline]: - r"""Return a callable for the get training pipeline method over gRPC. - - Gets a TrainingPipeline. - - Returns: - Callable[[~.GetTrainingPipelineRequest], - ~.TrainingPipeline]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_training_pipeline' not in self._stubs: - self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/GetTrainingPipeline', - request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, - response_deserializer=training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['get_training_pipeline'] - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - pipeline_service.ListTrainingPipelinesResponse]: - r"""Return a callable for the list training pipelines method over gRPC. - - Lists TrainingPipelines in a Location. - - Returns: - Callable[[~.ListTrainingPipelinesRequest], - ~.ListTrainingPipelinesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_training_pipelines' not in self._stubs: - self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/ListTrainingPipelines', - request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, - response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, - ) - return self._stubs['list_training_pipelines'] - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete training pipeline method over gRPC. - - Deletes a TrainingPipeline. - - Returns: - Callable[[~.DeleteTrainingPipelineRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_training_pipeline' not in self._stubs: - self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/DeleteTrainingPipeline', - request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_training_pipeline'] - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel training pipeline method over gRPC. - - Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelTrainingPipelineRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_training_pipeline' not in self._stubs: - self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CancelTrainingPipeline', - request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_training_pipeline'] - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - gca_pipeline_job.PipelineJob]: - r"""Return a callable for the create pipeline job method over gRPC. - - Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Returns: - Callable[[~.CreatePipelineJobRequest], - ~.PipelineJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_pipeline_job' not in self._stubs: - self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob', - request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, - response_deserializer=gca_pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['create_pipeline_job'] - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - pipeline_job.PipelineJob]: - r"""Return a callable for the get pipeline job method over gRPC. - - Gets a PipelineJob. - - Returns: - Callable[[~.GetPipelineJobRequest], - ~.PipelineJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_pipeline_job' not in self._stubs: - self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob', - request_serializer=pipeline_service.GetPipelineJobRequest.serialize, - response_deserializer=pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['get_pipeline_job'] - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - pipeline_service.ListPipelineJobsResponse]: - r"""Return a callable for the list pipeline jobs method over gRPC. - - Lists PipelineJobs in a Location. - - Returns: - Callable[[~.ListPipelineJobsRequest], - ~.ListPipelineJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_pipeline_jobs' not in self._stubs: - self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs', - request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, - response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, - ) - return self._stubs['list_pipeline_jobs'] - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete pipeline job method over gRPC. - - Deletes a PipelineJob. - - Returns: - Callable[[~.DeletePipelineJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_pipeline_job' not in self._stubs: - self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob', - request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_pipeline_job'] - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel pipeline job method over gRPC. - - Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelPipelineJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_pipeline_job' not in self._stubs: - self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob', - request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_pipeline_job'] - - -__all__ = ( - 'PipelineServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py deleted file mode 100644 index a2929cb5dd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,543 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PipelineServiceGrpcTransport - - -class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): - """gRPC AsyncIO backend transport for PipelineService. - - A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex - Pipelines). - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - Awaitable[gca_training_pipeline.TrainingPipeline]]: - r"""Return a callable for the create training pipeline method over gRPC. - - Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Returns: - Callable[[~.CreateTrainingPipelineRequest], - Awaitable[~.TrainingPipeline]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_training_pipeline' not in self._stubs: - self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CreateTrainingPipeline', - request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, - response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['create_training_pipeline'] - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - Awaitable[training_pipeline.TrainingPipeline]]: - r"""Return a callable for the get training pipeline method over gRPC. - - Gets a TrainingPipeline. - - Returns: - Callable[[~.GetTrainingPipelineRequest], - Awaitable[~.TrainingPipeline]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_training_pipeline' not in self._stubs: - self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/GetTrainingPipeline', - request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, - response_deserializer=training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['get_training_pipeline'] - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - Awaitable[pipeline_service.ListTrainingPipelinesResponse]]: - r"""Return a callable for the list training pipelines method over gRPC. - - Lists TrainingPipelines in a Location. - - Returns: - Callable[[~.ListTrainingPipelinesRequest], - Awaitable[~.ListTrainingPipelinesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_training_pipelines' not in self._stubs: - self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/ListTrainingPipelines', - request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, - response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, - ) - return self._stubs['list_training_pipelines'] - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete training pipeline method over gRPC. - - Deletes a TrainingPipeline. - - Returns: - Callable[[~.DeleteTrainingPipelineRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_training_pipeline' not in self._stubs: - self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/DeleteTrainingPipeline', - request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_training_pipeline'] - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel training pipeline method over gRPC. - - Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelTrainingPipelineRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_training_pipeline' not in self._stubs: - self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CancelTrainingPipeline', - request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_training_pipeline'] - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - Awaitable[gca_pipeline_job.PipelineJob]]: - r"""Return a callable for the create pipeline job method over gRPC. - - Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Returns: - Callable[[~.CreatePipelineJobRequest], - Awaitable[~.PipelineJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_pipeline_job' not in self._stubs: - self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob', - request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, - response_deserializer=gca_pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['create_pipeline_job'] - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - Awaitable[pipeline_job.PipelineJob]]: - r"""Return a callable for the get pipeline job method over gRPC. - - Gets a PipelineJob. - - Returns: - Callable[[~.GetPipelineJobRequest], - Awaitable[~.PipelineJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_pipeline_job' not in self._stubs: - self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob', - request_serializer=pipeline_service.GetPipelineJobRequest.serialize, - response_deserializer=pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['get_pipeline_job'] - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - Awaitable[pipeline_service.ListPipelineJobsResponse]]: - r"""Return a callable for the list pipeline jobs method over gRPC. - - Lists PipelineJobs in a Location. - - Returns: - Callable[[~.ListPipelineJobsRequest], - Awaitable[~.ListPipelineJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_pipeline_jobs' not in self._stubs: - self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs', - request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, - response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, - ) - return self._stubs['list_pipeline_jobs'] - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete pipeline job method over gRPC. - - Deletes a PipelineJob. - - Returns: - Callable[[~.DeletePipelineJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_pipeline_job' not in self._stubs: - self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob', - request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_pipeline_job'] - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel pipeline job method over gRPC. - - Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelPipelineJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_pipeline_job' not in self._stubs: - self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob', - request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_pipeline_job'] - - -__all__ = ( - 'PipelineServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py deleted file mode 100644 index 13c5d11c66..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PredictionServiceClient -from .async_client import PredictionServiceAsyncClient - -__all__ = ( - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py deleted file mode 100644 index 37e645c1d0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ /dev/null @@ -1,404 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import prediction_service -from google.protobuf import struct_pb2 # type: ignore -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .client import PredictionServiceClient - - -class PredictionServiceAsyncClient: - """A service for online predictions and explanations.""" - - _client: PredictionServiceClient - - DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) - common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PredictionServiceClient.common_project_path) - parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) - common_location_path = staticmethod(PredictionServiceClient.common_location_path) - parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PredictionServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def predict(self, - request: prediction_service.PredictRequest = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.PredictRequest`): - The request object. Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - endpoint (:class:`str`): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): - Required. The instances that are the input to the - prediction call. A DeployedModel may have an upper limit - on the number of instances it supports per request, and - when it is exceeded the prediction call errors in case - of AutoML Models, or, in case of customer created - Models, the behaviour is as documented by that Model. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (:class:`google.protobuf.struct_pb2.Value`): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.PredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if parameters is not None: - request.parameters = parameters - if instances: - request.instances.extend(instances) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.predict, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def explain(self, - request: prediction_service.ExplainRequest = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - deployed_model_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.ExplainResponse: - r"""Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ExplainRequest`): - The request object. Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - endpoint (:class:`str`): - Required. The name of the Endpoint requested to serve - the explanation. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): - Required. The instances that are the input to the - explanation call. A DeployedModel may have an upper - limit on the number of instances it supports per - request, and when it is exceeded the explanation call - errors in case of AutoML Models, or, in case of customer - created Models, the behaviour is as documented by that - Model. The schema of any single instance may be - specified via Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (:class:`google.protobuf.struct_pb2.Value`): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - If specified, this ExplainRequest will be served by the - chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ExplainResponse: - Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.ExplainRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if parameters is not None: - request.parameters = parameters - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - if instances: - request.instances.extend(instances) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.explain, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PredictionServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py deleted file mode 100644 index d71300114e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ /dev/null @@ -1,591 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import prediction_service -from google.protobuf import struct_pb2 # type: ignore -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PredictionServiceGrpcTransport -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport - - -class PredictionServiceClientMeta(type): - """Metaclass for the PredictionService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] - _transport_registry["grpc"] = PredictionServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[PredictionServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PredictionServiceClient(metaclass=PredictionServiceClientMeta): - """A service for online predictions and explanations.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PredictionServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PredictionServiceTransport): - # transport is a PredictionServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def predict(self, - request: prediction_service.PredictRequest = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. - - Args: - request (google.cloud.aiplatform_v1beta1.types.PredictRequest): - The request object. Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - endpoint (str): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - prediction call. A DeployedModel may have an upper limit - on the number of instances it supports per request, and - when it is exceeded the prediction call errors in case - of AutoML Models, or, in case of customer created - Models, the behaviour is as documented by that Model. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.PredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.PredictRequest): - request = prediction_service.PredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if instances is not None: - request.instances = instances - if parameters is not None: - request.parameters = parameters - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def explain(self, - request: prediction_service.ExplainRequest = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - deployed_model_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.ExplainResponse: - r"""Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ExplainRequest): - The request object. Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - endpoint (str): - Required. The name of the Endpoint requested to serve - the explanation. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - explanation call. A DeployedModel may have an upper - limit on the number of instances it supports per - request, and when it is exceeded the explanation call - errors in case of AutoML Models, or, in case of customer - created Models, the behaviour is as documented by that - Model. The schema of any single instance may be - specified via Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - If specified, this ExplainRequest will be served by the - chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ExplainResponse: - Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.ExplainRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.ExplainRequest): - request = prediction_service.ExplainRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if instances is not None: - request.instances = instances - if parameters is not None: - request.parameters = parameters - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.explain] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PredictionServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py deleted file mode 100644 index d747de2ce9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PredictionServiceTransport -from .grpc import PredictionServiceGrpcTransport -from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] -_transport_registry['grpc'] = PredictionServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport - -__all__ = ( - 'PredictionServiceTransport', - 'PredictionServiceGrpcTransport', - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py deleted file mode 100644 index 285f122090..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import prediction_service - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class PredictionServiceTransport(abc.ABC): - """Abstract transport class for PredictionService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.predict: gapic_v1.method.wrap_method( - self.predict, - default_timeout=5.0, - client_info=client_info, - ), - self.explain: gapic_v1.method.wrap_method( - self.explain, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Union[ - prediction_service.PredictResponse, - Awaitable[prediction_service.PredictResponse] - ]]: - raise NotImplementedError() - - @property - def explain(self) -> Callable[ - [prediction_service.ExplainRequest], - Union[ - prediction_service.ExplainResponse, - Awaitable[prediction_service.ExplainResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'PredictionServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py deleted file mode 100644 index 1e95e16b24..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ /dev/null @@ -1,289 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import prediction_service -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO - - -class PredictionServiceGrpcTransport(PredictionServiceTransport): - """gRPC backend transport for PredictionService. - - A service for online predictions and explanations. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - prediction_service.PredictResponse]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. - - Returns: - Callable[[~.PredictRequest], - ~.PredictResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def explain(self) -> Callable[ - [prediction_service.ExplainRequest], - prediction_service.ExplainResponse]: - r"""Return a callable for the explain method over gRPC. - - Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Returns: - Callable[[~.ExplainRequest], - ~.ExplainResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'explain' not in self._stubs: - self._stubs['explain'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/Explain', - request_serializer=prediction_service.ExplainRequest.serialize, - response_deserializer=prediction_service.ExplainResponse.deserialize, - ) - return self._stubs['explain'] - - -__all__ = ( - 'PredictionServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py deleted file mode 100644 index 2c43614249..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,293 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import prediction_service -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PredictionServiceGrpcTransport - - -class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): - """gRPC AsyncIO backend transport for PredictionService. - - A service for online predictions and explanations. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Awaitable[prediction_service.PredictResponse]]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. - - Returns: - Callable[[~.PredictRequest], - Awaitable[~.PredictResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def explain(self) -> Callable[ - [prediction_service.ExplainRequest], - Awaitable[prediction_service.ExplainResponse]]: - r"""Return a callable for the explain method over gRPC. - - Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Returns: - Callable[[~.ExplainRequest], - Awaitable[~.ExplainResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'explain' not in self._stubs: - self._stubs['explain'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/Explain', - request_serializer=prediction_service.ExplainRequest.serialize, - response_deserializer=prediction_service.ExplainResponse.deserialize, - ) - return self._stubs['explain'] - - -__all__ = ( - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py deleted file mode 100644 index 04af59e5fa..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import SpecialistPoolServiceClient -from .async_client import SpecialistPoolServiceAsyncClient - -__all__ = ( - 'SpecialistPoolServiceClient', - 'SpecialistPoolServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py deleted file mode 100644 index 2a8321c773..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ /dev/null @@ -1,650 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport -from .client import SpecialistPoolServiceClient - - -class SpecialistPoolServiceAsyncClient: - """A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - """ - - _client: SpecialistPoolServiceClient - - DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT - - specialist_pool_path = staticmethod(SpecialistPoolServiceClient.specialist_pool_path) - parse_specialist_pool_path = staticmethod(SpecialistPoolServiceClient.parse_specialist_pool_path) - common_billing_account_path = staticmethod(SpecialistPoolServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(SpecialistPoolServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(SpecialistPoolServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(SpecialistPoolServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(SpecialistPoolServiceClient.parse_common_organization_path) - common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path) - parse_common_project_path = staticmethod(SpecialistPoolServiceClient.parse_common_project_path) - common_location_path = staticmethod(SpecialistPoolServiceClient.common_location_path) - parse_common_location_path = staticmethod(SpecialistPoolServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceAsyncClient: The constructed client. - """ - return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceAsyncClient: The constructed client. - """ - return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> SpecialistPoolServiceTransport: - """Returns the transport used by the client instance. - - Returns: - SpecialistPoolServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(SpecialistPoolServiceClient).get_transport_class, type(SpecialistPoolServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the specialist pool service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.SpecialistPoolServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = SpecialistPoolServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_specialist_pool(self, - request: specialist_pool_service.CreateSpecialistPoolRequest = None, - *, - parent: str = None, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a SpecialistPool. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest`): - The request object. Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. - parent (:class:`str`): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - specialist_pool (:class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool`): - Required. The SpecialistPool to - create. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, specialist_pool]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.CreateSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if specialist_pool is not None: - request.specialist_pool = specialist_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_specialist_pool(self, - request: specialist_pool_service.GetSpecialistPoolRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> specialist_pool.SpecialistPool: - r"""Gets a SpecialistPool. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest`): - The request object. Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. - name (:class:`str`): - Required. The name of the SpecialistPool resource. The - form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.SpecialistPool: - SpecialistPool represents customers' - own workforce to work on their data - labeling jobs. It includes a group of - specialist managers who are responsible - for managing the labelers in this pool - as well as customers' data labeling jobs - associated with this pool. - Customers create specialist pool as well - as start data labeling jobs on Cloud, - managers and labelers work with the jobs - using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.GetSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_specialist_pools(self, - request: specialist_pool_service.ListSpecialistPoolsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSpecialistPoolsAsyncPager: - r"""Lists SpecialistPools in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest`): - The request object. Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - parent (:class:`str`): - Required. The name of the SpecialistPool's parent - resource. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: - Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.ListSpecialistPoolsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_specialist_pools, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListSpecialistPoolsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_specialist_pool(self, - request: specialist_pool_service.DeleteSpecialistPoolRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a SpecialistPool as well as all Specialists - in the pool. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest`): - The request object. Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. - name (:class:`str`): - Required. The resource name of the SpecialistPool to - delete. Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.DeleteSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def update_specialist_pool(self, - request: specialist_pool_service.UpdateSpecialistPoolRequest = None, - *, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a SpecialistPool. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest`): - The request object. Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. - specialist_pool (:class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool`): - Required. The SpecialistPool which - replaces the resource on the server. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([specialist_pool, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.UpdateSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if specialist_pool is not None: - request.specialist_pool = specialist_pool - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("specialist_pool.name", request.specialist_pool.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "SpecialistPoolServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py deleted file mode 100644 index 43ff07d422..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ /dev/null @@ -1,837 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import SpecialistPoolServiceGrpcTransport -from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport - - -class SpecialistPoolServiceClientMeta(type): - """Metaclass for the SpecialistPoolService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] - _transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport - _transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[SpecialistPoolServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): - """A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> SpecialistPoolServiceTransport: - """Returns the transport used by the client instance. - - Returns: - SpecialistPoolServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def specialist_pool_path(project: str,location: str,specialist_pool: str,) -> str: - """Returns a fully-qualified specialist_pool string.""" - return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) - - @staticmethod - def parse_specialist_pool_path(path: str) -> Dict[str,str]: - """Parses a specialist_pool path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, SpecialistPoolServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the specialist pool service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, SpecialistPoolServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, SpecialistPoolServiceTransport): - # transport is a SpecialistPoolServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_specialist_pool(self, - request: specialist_pool_service.CreateSpecialistPoolRequest = None, - *, - parent: str = None, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a SpecialistPool. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest): - The request object. Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. - parent (str): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): - Required. The SpecialistPool to - create. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, specialist_pool]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.CreateSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest): - request = specialist_pool_service.CreateSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if specialist_pool is not None: - request.specialist_pool = specialist_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - def get_specialist_pool(self, - request: specialist_pool_service.GetSpecialistPoolRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> specialist_pool.SpecialistPool: - r"""Gets a SpecialistPool. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest): - The request object. Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. - name (str): - Required. The name of the SpecialistPool resource. The - form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.SpecialistPool: - SpecialistPool represents customers' - own workforce to work on their data - labeling jobs. It includes a group of - specialist managers who are responsible - for managing the labelers in this pool - as well as customers' data labeling jobs - associated with this pool. - Customers create specialist pool as well - as start data labeling jobs on Cloud, - managers and labelers work with the jobs - using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.GetSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest): - request = specialist_pool_service.GetSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_specialist_pools(self, - request: specialist_pool_service.ListSpecialistPoolsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSpecialistPoolsPager: - r"""Lists SpecialistPools in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): - The request object. Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - parent (str): - Required. The name of the SpecialistPool's parent - resource. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: - Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.ListSpecialistPoolsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest): - request = specialist_pool_service.ListSpecialistPoolsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListSpecialistPoolsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_specialist_pool(self, - request: specialist_pool_service.DeleteSpecialistPoolRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a SpecialistPool as well as all Specialists - in the pool. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest): - The request object. Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. - name (str): - Required. The resource name of the SpecialistPool to - delete. Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.DeleteSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest): - request = specialist_pool_service.DeleteSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def update_specialist_pool(self, - request: specialist_pool_service.UpdateSpecialistPoolRequest = None, - *, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a SpecialistPool. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest): - The request object. Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. - specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): - Required. The SpecialistPool which - replaces the resource on the server. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers who are responsible for managing the - labelers in this pool as well as customers' data - labeling jobs associated with this pool. Customers - create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the - jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([specialist_pool, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.UpdateSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest): - request = specialist_pool_service.UpdateSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if specialist_pool is not None: - request.specialist_pool = specialist_pool - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("specialist_pool.name", request.specialist_pool.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "SpecialistPoolServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py deleted file mode 100644 index cac2a57671..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service - - -class ListSpecialistPoolsPager: - """A pager for iterating through ``list_specialist_pools`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``specialist_pools`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListSpecialistPools`` requests and continue to iterate - through the ``specialist_pools`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., specialist_pool_service.ListSpecialistPoolsResponse], - request: specialist_pool_service.ListSpecialistPoolsRequest, - response: specialist_pool_service.ListSpecialistPoolsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[specialist_pool_service.ListSpecialistPoolsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[specialist_pool.SpecialistPool]: - for page in self.pages: - yield from page.specialist_pools - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListSpecialistPoolsAsyncPager: - """A pager for iterating through ``list_specialist_pools`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``specialist_pools`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListSpecialistPools`` requests and continue to iterate - through the ``specialist_pools`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]], - request: specialist_pool_service.ListSpecialistPoolsRequest, - response: specialist_pool_service.ListSpecialistPoolsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[specialist_pool_service.ListSpecialistPoolsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[specialist_pool.SpecialistPool]: - async def async_generator(): - async for page in self.pages: - for response in page.specialist_pools: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py deleted file mode 100644 index ba8c9d7eb5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import SpecialistPoolServiceTransport -from .grpc import SpecialistPoolServiceGrpcTransport -from .grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] -_transport_registry['grpc'] = SpecialistPoolServiceGrpcTransport -_transport_registry['grpc_asyncio'] = SpecialistPoolServiceGrpcAsyncIOTransport - -__all__ = ( - 'SpecialistPoolServiceTransport', - 'SpecialistPoolServiceGrpcTransport', - 'SpecialistPoolServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py deleted file mode 100644 index fe83634f38..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py +++ /dev/null @@ -1,232 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class SpecialistPoolServiceTransport(abc.ABC): - """Abstract transport class for SpecialistPoolService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_specialist_pool: gapic_v1.method.wrap_method( - self.create_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - self.get_specialist_pool: gapic_v1.method.wrap_method( - self.get_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - self.list_specialist_pools: gapic_v1.method.wrap_method( - self.list_specialist_pools, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_specialist_pool: gapic_v1.method.wrap_method( - self.delete_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - self.update_specialist_pool: gapic_v1.method.wrap_method( - self.update_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - Union[ - specialist_pool.SpecialistPool, - Awaitable[specialist_pool.SpecialistPool] - ]]: - raise NotImplementedError() - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - Union[ - specialist_pool_service.ListSpecialistPoolsResponse, - Awaitable[specialist_pool_service.ListSpecialistPoolsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'SpecialistPoolServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py deleted file mode 100644 index 341de9c299..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore -from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO - - -class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport): - """gRPC backend transport for SpecialistPoolService. - - A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the create specialist pool method over gRPC. - - Creates a SpecialistPool. - - Returns: - Callable[[~.CreateSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_specialist_pool' not in self._stubs: - self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool', - request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_specialist_pool'] - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - specialist_pool.SpecialistPool]: - r"""Return a callable for the get specialist pool method over gRPC. - - Gets a SpecialistPool. - - Returns: - Callable[[~.GetSpecialistPoolRequest], - ~.SpecialistPool]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_specialist_pool' not in self._stubs: - self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool', - request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, - response_deserializer=specialist_pool.SpecialistPool.deserialize, - ) - return self._stubs['get_specialist_pool'] - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - specialist_pool_service.ListSpecialistPoolsResponse]: - r"""Return a callable for the list specialist pools method over gRPC. - - Lists SpecialistPools in a Location. - - Returns: - Callable[[~.ListSpecialistPoolsRequest], - ~.ListSpecialistPoolsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_specialist_pools' not in self._stubs: - self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools', - request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, - response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, - ) - return self._stubs['list_specialist_pools'] - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete specialist pool method over gRPC. - - Deletes a SpecialistPool as well as all Specialists - in the pool. - - Returns: - Callable[[~.DeleteSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_specialist_pool' not in self._stubs: - self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool', - request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_specialist_pool'] - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the update specialist pool method over gRPC. - - Updates a SpecialistPool. - - Returns: - Callable[[~.UpdateSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_specialist_pool' not in self._stubs: - self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool', - request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_specialist_pool'] - - -__all__ = ( - 'SpecialistPoolServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py deleted file mode 100644 index df1f92c5fa..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,386 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore -from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import SpecialistPoolServiceGrpcTransport - - -class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport): - """gRPC AsyncIO backend transport for SpecialistPoolService. - - A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create specialist pool method over gRPC. - - Creates a SpecialistPool. - - Returns: - Callable[[~.CreateSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_specialist_pool' not in self._stubs: - self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool', - request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_specialist_pool'] - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - Awaitable[specialist_pool.SpecialistPool]]: - r"""Return a callable for the get specialist pool method over gRPC. - - Gets a SpecialistPool. - - Returns: - Callable[[~.GetSpecialistPoolRequest], - Awaitable[~.SpecialistPool]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_specialist_pool' not in self._stubs: - self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool', - request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, - response_deserializer=specialist_pool.SpecialistPool.deserialize, - ) - return self._stubs['get_specialist_pool'] - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]]: - r"""Return a callable for the list specialist pools method over gRPC. - - Lists SpecialistPools in a Location. - - Returns: - Callable[[~.ListSpecialistPoolsRequest], - Awaitable[~.ListSpecialistPoolsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_specialist_pools' not in self._stubs: - self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools', - request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, - response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, - ) - return self._stubs['list_specialist_pools'] - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete specialist pool method over gRPC. - - Deletes a SpecialistPool as well as all Specialists - in the pool. - - Returns: - Callable[[~.DeleteSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_specialist_pool' not in self._stubs: - self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool', - request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_specialist_pool'] - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update specialist pool method over gRPC. - - Updates a SpecialistPool. - - Returns: - Callable[[~.UpdateSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_specialist_pool' not in self._stubs: - self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool', - request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_specialist_pool'] - - -__all__ = ( - 'SpecialistPoolServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py deleted file mode 100644 index fa8edec482..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import TensorboardServiceClient -from .async_client import TensorboardServiceAsyncClient - -__all__ = ( - 'TensorboardServiceClient', - 'TensorboardServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py deleted file mode 100644 index af1d3fe6ec..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ /dev/null @@ -1,2348 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport -from .client import TensorboardServiceClient - - -class TensorboardServiceAsyncClient: - """TensorboardService""" - - _client: TensorboardServiceClient - - DEFAULT_ENDPOINT = TensorboardServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = TensorboardServiceClient.DEFAULT_MTLS_ENDPOINT - - tensorboard_path = staticmethod(TensorboardServiceClient.tensorboard_path) - parse_tensorboard_path = staticmethod(TensorboardServiceClient.parse_tensorboard_path) - tensorboard_experiment_path = staticmethod(TensorboardServiceClient.tensorboard_experiment_path) - parse_tensorboard_experiment_path = staticmethod(TensorboardServiceClient.parse_tensorboard_experiment_path) - tensorboard_run_path = staticmethod(TensorboardServiceClient.tensorboard_run_path) - parse_tensorboard_run_path = staticmethod(TensorboardServiceClient.parse_tensorboard_run_path) - tensorboard_time_series_path = staticmethod(TensorboardServiceClient.tensorboard_time_series_path) - parse_tensorboard_time_series_path = staticmethod(TensorboardServiceClient.parse_tensorboard_time_series_path) - common_billing_account_path = staticmethod(TensorboardServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(TensorboardServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(TensorboardServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(TensorboardServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(TensorboardServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(TensorboardServiceClient.parse_common_organization_path) - common_project_path = staticmethod(TensorboardServiceClient.common_project_path) - parse_common_project_path = staticmethod(TensorboardServiceClient.parse_common_project_path) - common_location_path = staticmethod(TensorboardServiceClient.common_location_path) - parse_common_location_path = staticmethod(TensorboardServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceAsyncClient: The constructed client. - """ - return TensorboardServiceClient.from_service_account_info.__func__(TensorboardServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceAsyncClient: The constructed client. - """ - return TensorboardServiceClient.from_service_account_file.__func__(TensorboardServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> TensorboardServiceTransport: - """Returns the transport used by the client instance. - - Returns: - TensorboardServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(TensorboardServiceClient).get_transport_class, type(TensorboardServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, TensorboardServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the tensorboard service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.TensorboardServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = TensorboardServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_tensorboard(self, - request: tensorboard_service.CreateTensorboardRequest = None, - *, - parent: str = None, - tensorboard: gca_tensorboard.Tensorboard = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a Tensorboard. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest`): - The request object. Request message for - [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Tensorboard in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): - Required. The Tensorboard to create. - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard(self, - request: tensorboard_service.GetTensorboardRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard.Tensorboard: - r"""Gets a Tensorboard. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest`): - The request object. Request message for - [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. - name (:class:`str`): - Required. The name of the Tensorboard resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Tensorboard: - Tensorboard is a physical database - that stores users' training metrics. A - default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard(self, - request: tensorboard_service.UpdateTensorboardRequest = None, - *, - tensorboard: gca_tensorboard.Tensorboard = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a Tensorboard. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest`): - The request object. Request message for - [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. - tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): - Required. The Tensorboard's ``name`` field is used to - identify the Tensorboard to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the Tensorboard resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if - new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard.name", request.tensorboard.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - async def list_tensorboards(self, - request: tensorboard_service.ListTensorboardsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardsAsyncPager: - r"""Lists Tensorboards in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest`): - The request object. Request message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - parent (:class:`str`): - Required. The resource name of the - Location to list Tensorboards. Format: - 'projects/{project}/locations/{location}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager: - Response message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboards, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard(self, - request: tensorboard_service.DeleteTensorboardRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Tensorboard. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest`): - The request object. Request message for - [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. - name (:class:`str`): - Required. The name of the Tensorboard to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_tensorboard_experiment(self, - request: tensorboard_service.CreateTensorboardExperimentRequest = None, - *, - parent: str = None, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - tensorboard_experiment_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Creates a TensorboardExperiment. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest`): - The request object. Request message for - [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. - parent (:class:`str`): - Required. The resource name of the Tensorboard to create - the TensorboardExperiment in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): - The TensorboardExperiment to create. - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment_id (:class:`str`): - Required. The ID to use for the Tensorboard experiment, - which will become the final component of the Tensorboard - experiment's resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_experiment_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if tensorboard_experiment_id is not None: - request.tensorboard_experiment_id = tensorboard_experiment_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard_experiment(self, - request: tensorboard_service.GetTensorboardExperimentRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_experiment.TensorboardExperiment: - r"""Gets a TensorboardExperiment. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest`): - The request object. Request message for - [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. - name (:class:`str`): - Required. The name of the TensorboardExperiment - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard_experiment(self, - request: tensorboard_service.UpdateTensorboardExperimentRequest = None, - *, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Updates a TensorboardExperiment. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest`): - The request object. Request message for - [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. - tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): - Required. The TensorboardExperiment's ``name`` field is - used to identify the TensorboardExperiment to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardExperiment resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment.name", request.tensorboard_experiment.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_tensorboard_experiments(self, - request: tensorboard_service.ListTensorboardExperimentsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardExperimentsAsyncPager: - r"""Lists TensorboardExperiments in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest`): - The request object. Request message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - parent (:class:`str`): - Required. The resource name of the - Tensorboard to list - TensorboardExperiments. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager: - Response message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardExperimentsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboard_experiments, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardExperimentsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard_experiment(self, - request: tensorboard_service.DeleteTensorboardExperimentRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TensorboardExperiment. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest`): - The request object. Request message for - [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. - name (:class:`str`): - Required. The name of the TensorboardExperiment to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_tensorboard_run(self, - request: tensorboard_service.CreateTensorboardRunRequest = None, - *, - parent: str = None, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - tensorboard_run_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Creates a TensorboardRun. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest`): - The request object. Request message for - [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. - parent (:class:`str`): - Required. The resource name of the Tensorboard to create - the TensorboardRun in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): - Required. The TensorboardRun to - create. - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run_id (:class:`str`): - Required. The ID to use for the Tensorboard run, which - will become the final component of the Tensorboard run's - resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_run_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if tensorboard_run_id is not None: - request.tensorboard_run_id = tensorboard_run_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard_run(self, - request: tensorboard_service.GetTensorboardRunRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_run.TensorboardRun: - r"""Gets a TensorboardRun. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest`): - The request object. Request message for - [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. - name (:class:`str`): - Required. The name of the TensorboardRun resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard_run(self, - request: tensorboard_service.UpdateTensorboardRunRequest = None, - *, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Updates a TensorboardRun. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest`): - The request object. Request message for - [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. - tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): - Required. The TensorboardRun's ``name`` field is used to - identify the TensorboardRun to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardRun resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run.name", request.tensorboard_run.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_tensorboard_runs(self, - request: tensorboard_service.ListTensorboardRunsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardRunsAsyncPager: - r"""Lists TensorboardRuns in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest`): - The request object. Request message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - parent (:class:`str`): - Required. The resource name of the - Tensorboard to list TensorboardRuns. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager: - Response message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardRunsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboard_runs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardRunsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard_run(self, - request: tensorboard_service.DeleteTensorboardRunRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TensorboardRun. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest`): - The request object. Request message for - [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. - name (:class:`str`): - Required. The name of the TensorboardRun to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_tensorboard_time_series(self, - request: tensorboard_service.CreateTensorboardTimeSeriesRequest = None, - *, - parent: str = None, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Creates a TensorboardTimeSeries. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest`): - The request object. Request message for - [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. - parent (:class:`str`): - Required. The resource name of the TensorboardRun to - create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): - Required. The TensorboardTimeSeries - to create. - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard_time_series(self, - request: tensorboard_service.GetTensorboardTimeSeriesRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_time_series.TensorboardTimeSeries: - r"""Gets a TensorboardTimeSeries. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest`): - The request object. Request message for - [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. - name (:class:`str`): - Required. The name of the TensorboardTimeSeries - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard_time_series(self, - request: tensorboard_service.UpdateTensorboardTimeSeriesRequest = None, - *, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Updates a TensorboardTimeSeries. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest`): - The request object. Request message for - [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. - tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): - Required. The TensorboardTimeSeries' ``name`` field is - used to identify the TensorboardTimeSeries to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardTimeSeries resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series.name", request.tensorboard_time_series.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_tensorboard_time_series(self, - request: tensorboard_service.ListTensorboardTimeSeriesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardTimeSeriesAsyncPager: - r"""Lists TensorboardTimeSeries in a Location. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest`): - The request object. Request message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - parent (:class:`str`): - Required. The resource name of the - TensorboardRun to list - TensorboardTimeSeries. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager: - Response message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardTimeSeriesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard_time_series(self, - request: tensorboard_service.DeleteTensorboardTimeSeriesRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TensorboardTimeSeries. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest`): - The request object. Request message for - [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. - name (:class:`str`): - Required. The name of the TensorboardTimeSeries to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def read_tensorboard_time_series_data(self, - request: tensorboard_service.ReadTensorboardTimeSeriesDataRequest = None, - *, - tensorboard_time_series: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: - r"""Reads a TensorboardTimeSeries' data. Data is returned in - paginated responses. By default, if the number of data points - stored is less than 1000, all data will be returned. Otherwise, - 1000 data points will be randomly selected from this time series - and returned. This value can be changed by changing - max_data_points. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest`): - The request object. Request message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - tensorboard_time_series (:class:`str`): - Required. The resource name of the TensorboardTimeSeries - to read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_tensorboard_time_series_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_tensorboard_blob_data(self, - request: tensorboard_service.ReadTensorboardBlobDataRequest = None, - *, - time_series: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[tensorboard_service.ReadTensorboardBlobDataResponse]]: - r"""Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest`): - The request object. Request message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - time_series (:class:`str`): - Required. The resource name of the TensorboardTimeSeries - to list Blobs. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' - - This corresponds to the ``time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: - Response message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ReadTensorboardBlobDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if time_series is not None: - request.time_series = time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_tensorboard_blob_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("time_series", request.time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def write_tensorboard_run_data(self, - request: tensorboard_service.WriteTensorboardRunDataRequest = None, - *, - tensorboard_run: str = None, - time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardRunDataResponse: - r"""Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest`): - The request object. Request message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - tensorboard_run (:class:`str`): - Required. The resource name of the TensorboardRun to - write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - time_series_data (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]`): - Required. The TensorboardTimeSeries - data to write. Values with in a time - series are indexed by their step value. - Repeated writes to the same step will - overwrite the existing value for that - step. - The upper limit of data points per write - request is 5000. - - This corresponds to the ``time_series_data`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: - Response message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, time_series_data]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.WriteTensorboardRunDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if time_series_data: - request.time_series_data.extend(time_series_data) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.write_tensorboard_run_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run", request.tensorboard_run), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def export_tensorboard_time_series_data(self, - request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest = None, - *, - tensorboard_time_series: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ExportTensorboardTimeSeriesDataAsyncPager: - r"""Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest`): - The request object. Request message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - tensorboard_time_series (:class:`str`): - Required. The resource name of the TensorboardTimeSeries - to export data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager: - Response message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_tensorboard_time_series_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ExportTensorboardTimeSeriesDataAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "TensorboardServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py deleted file mode 100644 index 140d9569e3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py +++ /dev/null @@ -1,2562 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import TensorboardServiceGrpcTransport -from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport - - -class TensorboardServiceClientMeta(type): - """Metaclass for the TensorboardService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] - _transport_registry["grpc"] = TensorboardServiceGrpcTransport - _transport_registry["grpc_asyncio"] = TensorboardServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[TensorboardServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class TensorboardServiceClient(metaclass=TensorboardServiceClientMeta): - """TensorboardService""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> TensorboardServiceTransport: - """Returns the transport used by the client instance. - - Returns: - TensorboardServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: - """Returns a fully-qualified tensorboard string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - - @staticmethod - def parse_tensorboard_path(path: str) -> Dict[str,str]: - """Parses a tensorboard path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_experiment_path(project: str,location: str,tensorboard: str,experiment: str,) -> str: - """Returns a fully-qualified tensorboard_experiment string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) - - @staticmethod - def parse_tensorboard_experiment_path(path: str) -> Dict[str,str]: - """Parses a tensorboard_experiment path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_run_path(project: str,location: str,tensorboard: str,experiment: str,run: str,) -> str: - """Returns a fully-qualified tensorboard_run string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) - - @staticmethod - def parse_tensorboard_run_path(path: str) -> Dict[str,str]: - """Parses a tensorboard_run path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_time_series_path(project: str,location: str,tensorboard: str,experiment: str,run: str,time_series: str,) -> str: - """Returns a fully-qualified tensorboard_time_series string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) - - @staticmethod - def parse_tensorboard_time_series_path(path: str) -> Dict[str,str]: - """Parses a tensorboard_time_series path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)/timeSeries/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, TensorboardServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the tensorboard service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, TensorboardServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, TensorboardServiceTransport): - # transport is a TensorboardServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_tensorboard(self, - request: tensorboard_service.CreateTensorboardRequest = None, - *, - parent: str = None, - tensorboard: gca_tensorboard.Tensorboard = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a Tensorboard. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest): - The request object. Request message for - [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. - parent (str): - Required. The resource name of the Location to create - the Tensorboard in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): - Required. The Tensorboard to create. - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardRequest): - request = tensorboard_service.CreateTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - def get_tensorboard(self, - request: tensorboard_service.GetTensorboardRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard.Tensorboard: - r"""Gets a Tensorboard. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest): - The request object. Request message for - [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. - name (str): - Required. The name of the Tensorboard resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Tensorboard: - Tensorboard is a physical database - that stores users' training metrics. A - default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardRequest): - request = tensorboard_service.GetTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard(self, - request: tensorboard_service.UpdateTensorboardRequest = None, - *, - tensorboard: gca_tensorboard.Tensorboard = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a Tensorboard. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest): - The request object. Request message for - [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. - tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): - Required. The Tensorboard's ``name`` field is used to - identify the Tensorboard to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the Tensorboard resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if - new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardRequest): - request = tensorboard_service.UpdateTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard.name", request.tensorboard.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - def list_tensorboards(self, - request: tensorboard_service.ListTensorboardsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardsPager: - r"""Lists Tensorboards in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): - The request object. Request message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - parent (str): - Required. The resource name of the - Location to list Tensorboards. Format: - 'projects/{project}/locations/{location}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsPager: - Response message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardsRequest): - request = tensorboard_service.ListTensorboardsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboards] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard(self, - request: tensorboard_service.DeleteTensorboardRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Tensorboard. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest): - The request object. Request message for - [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. - name (str): - Required. The name of the Tensorboard to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardRequest): - request = tensorboard_service.DeleteTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_tensorboard_experiment(self, - request: tensorboard_service.CreateTensorboardExperimentRequest = None, - *, - parent: str = None, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - tensorboard_experiment_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Creates a TensorboardExperiment. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest): - The request object. Request message for - [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. - parent (str): - Required. The resource name of the Tensorboard to create - the TensorboardExperiment in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): - The TensorboardExperiment to create. - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment_id (str): - Required. The ID to use for the Tensorboard experiment, - which will become the final component of the Tensorboard - experiment's resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_experiment_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardExperimentRequest): - request = tensorboard_service.CreateTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if tensorboard_experiment_id is not None: - request.tensorboard_experiment_id = tensorboard_experiment_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_tensorboard_experiment(self, - request: tensorboard_service.GetTensorboardExperimentRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_experiment.TensorboardExperiment: - r"""Gets a TensorboardExperiment. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest): - The request object. Request message for - [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. - name (str): - Required. The name of the TensorboardExperiment - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardExperimentRequest): - request = tensorboard_service.GetTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard_experiment(self, - request: tensorboard_service.UpdateTensorboardExperimentRequest = None, - *, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Updates a TensorboardExperiment. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest): - The request object. Request message for - [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. - tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): - Required. The TensorboardExperiment's ``name`` field is - used to identify the TensorboardExperiment to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardExperiment resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardExperimentRequest): - request = tensorboard_service.UpdateTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment.name", request.tensorboard_experiment.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_tensorboard_experiments(self, - request: tensorboard_service.ListTensorboardExperimentsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardExperimentsPager: - r"""Lists TensorboardExperiments in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): - The request object. Request message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - parent (str): - Required. The resource name of the - Tensorboard to list - TensorboardExperiments. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager: - Response message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardExperimentsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardExperimentsRequest): - request = tensorboard_service.ListTensorboardExperimentsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_experiments] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardExperimentsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard_experiment(self, - request: tensorboard_service.DeleteTensorboardExperimentRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TensorboardExperiment. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest): - The request object. Request message for - [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. - name (str): - Required. The name of the TensorboardExperiment to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardExperimentRequest): - request = tensorboard_service.DeleteTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_tensorboard_run(self, - request: tensorboard_service.CreateTensorboardRunRequest = None, - *, - parent: str = None, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - tensorboard_run_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Creates a TensorboardRun. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest): - The request object. Request message for - [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. - parent (str): - Required. The resource name of the Tensorboard to create - the TensorboardRun in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): - Required. The TensorboardRun to - create. - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run_id (str): - Required. The ID to use for the Tensorboard run, which - will become the final component of the Tensorboard run's - resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_run_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardRunRequest): - request = tensorboard_service.CreateTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if tensorboard_run_id is not None: - request.tensorboard_run_id = tensorboard_run_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_tensorboard_run(self, - request: tensorboard_service.GetTensorboardRunRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_run.TensorboardRun: - r"""Gets a TensorboardRun. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest): - The request object. Request message for - [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. - name (str): - Required. The name of the TensorboardRun resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardRunRequest): - request = tensorboard_service.GetTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard_run(self, - request: tensorboard_service.UpdateTensorboardRunRequest = None, - *, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Updates a TensorboardRun. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest): - The request object. Request message for - [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. - tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): - Required. The TensorboardRun's ``name`` field is used to - identify the TensorboardRun to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardRun resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardRunRequest): - request = tensorboard_service.UpdateTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run.name", request.tensorboard_run.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_tensorboard_runs(self, - request: tensorboard_service.ListTensorboardRunsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardRunsPager: - r"""Lists TensorboardRuns in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): - The request object. Request message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - parent (str): - Required. The resource name of the - Tensorboard to list TensorboardRuns. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsPager: - Response message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardRunsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardRunsRequest): - request = tensorboard_service.ListTensorboardRunsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_runs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardRunsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard_run(self, - request: tensorboard_service.DeleteTensorboardRunRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TensorboardRun. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest): - The request object. Request message for - [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. - name (str): - Required. The name of the TensorboardRun to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardRunRequest): - request = tensorboard_service.DeleteTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_tensorboard_time_series(self, - request: tensorboard_service.CreateTensorboardTimeSeriesRequest = None, - *, - parent: str = None, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Creates a TensorboardTimeSeries. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest): - The request object. Request message for - [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. - parent (str): - Required. The resource name of the TensorboardRun to - create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries - to create. - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardTimeSeriesRequest): - request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_tensorboard_time_series(self, - request: tensorboard_service.GetTensorboardTimeSeriesRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_time_series.TensorboardTimeSeries: - r"""Gets a TensorboardTimeSeries. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest): - The request object. Request message for - [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. - name (str): - Required. The name of the TensorboardTimeSeries - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardTimeSeriesRequest): - request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard_time_series(self, - request: tensorboard_service.UpdateTensorboardTimeSeriesRequest = None, - *, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Updates a TensorboardTimeSeries. - - Args: - request (google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest): - The request object. Request message for - [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. - tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries' ``name`` field is - used to identify the TensorboardTimeSeries to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardTimeSeries resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardTimeSeriesRequest): - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series.name", request.tensorboard_time_series.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_tensorboard_time_series(self, - request: tensorboard_service.ListTensorboardTimeSeriesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardTimeSeriesPager: - r"""Lists TensorboardTimeSeries in a Location. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): - The request object. Request message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - parent (str): - Required. The resource name of the - TensorboardRun to list - TensorboardTimeSeries. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager: - Response message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardTimeSeriesRequest): - request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardTimeSeriesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard_time_series(self, - request: tensorboard_service.DeleteTensorboardTimeSeriesRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TensorboardTimeSeries. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest): - The request object. Request message for - [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. - name (str): - Required. The name of the TensorboardTimeSeries to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardTimeSeriesRequest): - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def read_tensorboard_time_series_data(self, - request: tensorboard_service.ReadTensorboardTimeSeriesDataRequest = None, - *, - tensorboard_time_series: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: - r"""Reads a TensorboardTimeSeries' data. Data is returned in - paginated responses. By default, if the number of data points - stored is less than 1000, all data will be returned. Otherwise, - 1000 data points will be randomly selected from this time series - and returned. This value can be changed by changing - max_data_points. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest): - The request object. Request message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries - to read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ReadTensorboardTimeSeriesDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ReadTensorboardTimeSeriesDataRequest): - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_time_series_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_tensorboard_blob_data(self, - request: tensorboard_service.ReadTensorboardBlobDataRequest = None, - *, - time_series: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[tensorboard_service.ReadTensorboardBlobDataResponse]: - r"""Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest): - The request object. Request message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - time_series (str): - Required. The resource name of the TensorboardTimeSeries - to list Blobs. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' - - This corresponds to the ``time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: - Response message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ReadTensorboardBlobDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ReadTensorboardBlobDataRequest): - request = tensorboard_service.ReadTensorboardBlobDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if time_series is not None: - request.time_series = time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_blob_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("time_series", request.time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def write_tensorboard_run_data(self, - request: tensorboard_service.WriteTensorboardRunDataRequest = None, - *, - tensorboard_run: str = None, - time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardRunDataResponse: - r"""Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Args: - request (google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest): - The request object. Request message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - tensorboard_run (str): - Required. The resource name of the TensorboardRun to - write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): - Required. The TensorboardTimeSeries - data to write. Values with in a time - series are indexed by their step value. - Repeated writes to the same step will - overwrite the existing value for that - step. - The upper limit of data points per write - request is 5000. - - This corresponds to the ``time_series_data`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: - Response message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, time_series_data]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.WriteTensorboardRunDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.WriteTensorboardRunDataRequest): - request = tensorboard_service.WriteTensorboardRunDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if time_series_data is not None: - request.time_series_data = time_series_data - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_run_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run", request.tensorboard_run), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def export_tensorboard_time_series_data(self, - request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest = None, - *, - tensorboard_time_series: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ExportTensorboardTimeSeriesDataPager: - r"""Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): - The request object. Request message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries - to export data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager: - Response message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ExportTensorboardTimeSeriesDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ExportTensorboardTimeSeriesDataRequest): - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_tensorboard_time_series_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ExportTensorboardTimeSeriesDataPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "TensorboardServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py deleted file mode 100644 index d0e307d0f7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series - - -class ListTensorboardsPager: - """A pager for iterating through ``list_tensorboards`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboards`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboards`` requests and continue to iterate - through the ``tensorboards`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardsResponse], - request: tensorboard_service.ListTensorboardsRequest, - response: tensorboard_service.ListTensorboardsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[tensorboard_service.ListTensorboardsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[tensorboard.Tensorboard]: - for page in self.pages: - yield from page.tensorboards - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardsAsyncPager: - """A pager for iterating through ``list_tensorboards`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboards`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboards`` requests and continue to iterate - through the ``tensorboards`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardsResponse]], - request: tensorboard_service.ListTensorboardsRequest, - response: tensorboard_service.ListTensorboardsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[tensorboard_service.ListTensorboardsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[tensorboard.Tensorboard]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboards: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardExperimentsPager: - """A pager for iterating through ``list_tensorboard_experiments`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboard_experiments`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboardExperiments`` requests and continue to iterate - through the ``tensorboard_experiments`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardExperimentsResponse], - request: tensorboard_service.ListTensorboardExperimentsRequest, - response: tensorboard_service.ListTensorboardExperimentsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[tensorboard_service.ListTensorboardExperimentsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[tensorboard_experiment.TensorboardExperiment]: - for page in self.pages: - yield from page.tensorboard_experiments - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardExperimentsAsyncPager: - """A pager for iterating through ``list_tensorboard_experiments`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboard_experiments`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboardExperiments`` requests and continue to iterate - through the ``tensorboard_experiments`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]], - request: tensorboard_service.ListTensorboardExperimentsRequest, - response: tensorboard_service.ListTensorboardExperimentsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[tensorboard_service.ListTensorboardExperimentsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[tensorboard_experiment.TensorboardExperiment]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboard_experiments: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardRunsPager: - """A pager for iterating through ``list_tensorboard_runs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboard_runs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboardRuns`` requests and continue to iterate - through the ``tensorboard_runs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardRunsResponse], - request: tensorboard_service.ListTensorboardRunsRequest, - response: tensorboard_service.ListTensorboardRunsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardRunsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[tensorboard_service.ListTensorboardRunsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[tensorboard_run.TensorboardRun]: - for page in self.pages: - yield from page.tensorboard_runs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardRunsAsyncPager: - """A pager for iterating through ``list_tensorboard_runs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboard_runs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboardRuns`` requests and continue to iterate - through the ``tensorboard_runs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardRunsResponse]], - request: tensorboard_service.ListTensorboardRunsRequest, - response: tensorboard_service.ListTensorboardRunsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardRunsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[tensorboard_service.ListTensorboardRunsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[tensorboard_run.TensorboardRun]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboard_runs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardTimeSeriesPager: - """A pager for iterating through ``list_tensorboard_time_series`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboard_time_series`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboardTimeSeries`` requests and continue to iterate - through the ``tensorboard_time_series`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardTimeSeriesResponse], - request: tensorboard_service.ListTensorboardTimeSeriesRequest, - response: tensorboard_service.ListTensorboardTimeSeriesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[tensorboard_service.ListTensorboardTimeSeriesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[tensorboard_time_series.TensorboardTimeSeries]: - for page in self.pages: - yield from page.tensorboard_time_series - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardTimeSeriesAsyncPager: - """A pager for iterating through ``list_tensorboard_time_series`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboard_time_series`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboardTimeSeries`` requests and continue to iterate - through the ``tensorboard_time_series`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]], - request: tensorboard_service.ListTensorboardTimeSeriesRequest, - response: tensorboard_service.ListTensorboardTimeSeriesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[tensorboard_service.ListTensorboardTimeSeriesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[tensorboard_time_series.TensorboardTimeSeries]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboard_time_series: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ExportTensorboardTimeSeriesDataPager: - """A pager for iterating through ``export_tensorboard_time_series_data`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and - provides an ``__iter__`` method to iterate through its - ``time_series_data_points`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ExportTensorboardTimeSeriesData`` requests and continue to iterate - through the ``time_series_data_points`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ExportTensorboardTimeSeriesDataResponse], - request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, - response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[tensorboard_data.TimeSeriesDataPoint]: - for page in self.pages: - yield from page.time_series_data_points - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ExportTensorboardTimeSeriesDataAsyncPager: - """A pager for iterating through ``export_tensorboard_time_series_data`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``time_series_data_points`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ExportTensorboardTimeSeriesData`` requests and continue to iterate - through the ``time_series_data_points`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]], - request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, - response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[tensorboard_data.TimeSeriesDataPoint]: - async def async_generator(): - async for page in self.pages: - for response in page.time_series_data_points: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py deleted file mode 100644 index 9565b55932..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import TensorboardServiceTransport -from .grpc import TensorboardServiceGrpcTransport -from .grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] -_transport_registry['grpc'] = TensorboardServiceGrpcTransport -_transport_registry['grpc_asyncio'] = TensorboardServiceGrpcAsyncIOTransport - -__all__ = ( - 'TensorboardServiceTransport', - 'TensorboardServiceGrpcTransport', - 'TensorboardServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py deleted file mode 100644 index 292d2c4990..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py +++ /dev/null @@ -1,504 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class TensorboardServiceTransport(abc.ABC): - """Abstract transport class for TensorboardService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_tensorboard: gapic_v1.method.wrap_method( - self.create_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard: gapic_v1.method.wrap_method( - self.get_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard: gapic_v1.method.wrap_method( - self.update_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboards: gapic_v1.method.wrap_method( - self.list_tensorboards, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard: gapic_v1.method.wrap_method( - self.delete_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.create_tensorboard_experiment: gapic_v1.method.wrap_method( - self.create_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard_experiment: gapic_v1.method.wrap_method( - self.get_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard_experiment: gapic_v1.method.wrap_method( - self.update_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboard_experiments: gapic_v1.method.wrap_method( - self.list_tensorboard_experiments, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard_experiment: gapic_v1.method.wrap_method( - self.delete_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.create_tensorboard_run: gapic_v1.method.wrap_method( - self.create_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard_run: gapic_v1.method.wrap_method( - self.get_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard_run: gapic_v1.method.wrap_method( - self.update_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboard_runs: gapic_v1.method.wrap_method( - self.list_tensorboard_runs, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard_run: gapic_v1.method.wrap_method( - self.delete_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.create_tensorboard_time_series: gapic_v1.method.wrap_method( - self.create_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard_time_series: gapic_v1.method.wrap_method( - self.get_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard_time_series: gapic_v1.method.wrap_method( - self.update_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboard_time_series: gapic_v1.method.wrap_method( - self.list_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard_time_series: gapic_v1.method.wrap_method( - self.delete_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.read_tensorboard_time_series_data: gapic_v1.method.wrap_method( - self.read_tensorboard_time_series_data, - default_timeout=None, - client_info=client_info, - ), - self.read_tensorboard_blob_data: gapic_v1.method.wrap_method( - self.read_tensorboard_blob_data, - default_timeout=None, - client_info=client_info, - ), - self.write_tensorboard_run_data: gapic_v1.method.wrap_method( - self.write_tensorboard_run_data, - default_timeout=None, - client_info=client_info, - ), - self.export_tensorboard_time_series_data: gapic_v1.method.wrap_method( - self.export_tensorboard_time_series_data, - default_timeout=None, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_tensorboard(self) -> Callable[ - [tensorboard_service.CreateTensorboardRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard(self) -> Callable[ - [tensorboard_service.GetTensorboardRequest], - Union[ - tensorboard.Tensorboard, - Awaitable[tensorboard.Tensorboard] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def list_tensorboards(self) -> Callable[ - [tensorboard_service.ListTensorboardsRequest], - Union[ - tensorboard_service.ListTensorboardsResponse, - Awaitable[tensorboard_service.ListTensorboardsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.CreateTensorboardExperimentRequest], - Union[ - gca_tensorboard_experiment.TensorboardExperiment, - Awaitable[gca_tensorboard_experiment.TensorboardExperiment] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.GetTensorboardExperimentRequest], - Union[ - tensorboard_experiment.TensorboardExperiment, - Awaitable[tensorboard_experiment.TensorboardExperiment] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.UpdateTensorboardExperimentRequest], - Union[ - gca_tensorboard_experiment.TensorboardExperiment, - Awaitable[gca_tensorboard_experiment.TensorboardExperiment] - ]]: - raise NotImplementedError() - - @property - def list_tensorboard_experiments(self) -> Callable[ - [tensorboard_service.ListTensorboardExperimentsRequest], - Union[ - tensorboard_service.ListTensorboardExperimentsResponse, - Awaitable[tensorboard_service.ListTensorboardExperimentsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.DeleteTensorboardExperimentRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_tensorboard_run(self) -> Callable[ - [tensorboard_service.CreateTensorboardRunRequest], - Union[ - gca_tensorboard_run.TensorboardRun, - Awaitable[gca_tensorboard_run.TensorboardRun] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard_run(self) -> Callable[ - [tensorboard_service.GetTensorboardRunRequest], - Union[ - tensorboard_run.TensorboardRun, - Awaitable[tensorboard_run.TensorboardRun] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard_run(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRunRequest], - Union[ - gca_tensorboard_run.TensorboardRun, - Awaitable[gca_tensorboard_run.TensorboardRun] - ]]: - raise NotImplementedError() - - @property - def list_tensorboard_runs(self) -> Callable[ - [tensorboard_service.ListTensorboardRunsRequest], - Union[ - tensorboard_service.ListTensorboardRunsResponse, - Awaitable[tensorboard_service.ListTensorboardRunsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard_run(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRunRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.CreateTensorboardTimeSeriesRequest], - Union[ - gca_tensorboard_time_series.TensorboardTimeSeries, - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.GetTensorboardTimeSeriesRequest], - Union[ - tensorboard_time_series.TensorboardTimeSeries, - Awaitable[tensorboard_time_series.TensorboardTimeSeries] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.UpdateTensorboardTimeSeriesRequest], - Union[ - gca_tensorboard_time_series.TensorboardTimeSeries, - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] - ]]: - raise NotImplementedError() - - @property - def list_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.ListTensorboardTimeSeriesRequest], - Union[ - tensorboard_service.ListTensorboardTimeSeriesResponse, - Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.DeleteTensorboardTimeSeriesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], - Union[ - tensorboard_service.ReadTensorboardTimeSeriesDataResponse, - Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse] - ]]: - raise NotImplementedError() - - @property - def read_tensorboard_blob_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardBlobDataRequest], - Union[ - tensorboard_service.ReadTensorboardBlobDataResponse, - Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse] - ]]: - raise NotImplementedError() - - @property - def write_tensorboard_run_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardRunDataRequest], - Union[ - tensorboard_service.WriteTensorboardRunDataResponse, - Awaitable[tensorboard_service.WriteTensorboardRunDataResponse] - ]]: - raise NotImplementedError() - - @property - def export_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], - Union[ - tensorboard_service.ExportTensorboardTimeSeriesDataResponse, - Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'TensorboardServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py deleted file mode 100644 index 1f8bd336b4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py +++ /dev/null @@ -1,889 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 # type: ignore -from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO - - -class TensorboardServiceGrpcTransport(TensorboardServiceTransport): - """gRPC backend transport for TensorboardService. - - TensorboardService - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_tensorboard(self) -> Callable[ - [tensorboard_service.CreateTensorboardRequest], - operations_pb2.Operation]: - r"""Return a callable for the create tensorboard method over gRPC. - - Creates a Tensorboard. - - Returns: - Callable[[~.CreateTensorboardRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard' not in self._stubs: - self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard', - request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_tensorboard'] - - @property - def get_tensorboard(self) -> Callable[ - [tensorboard_service.GetTensorboardRequest], - tensorboard.Tensorboard]: - r"""Return a callable for the get tensorboard method over gRPC. - - Gets a Tensorboard. - - Returns: - Callable[[~.GetTensorboardRequest], - ~.Tensorboard]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard' not in self._stubs: - self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard', - request_serializer=tensorboard_service.GetTensorboardRequest.serialize, - response_deserializer=tensorboard.Tensorboard.deserialize, - ) - return self._stubs['get_tensorboard'] - - @property - def update_tensorboard(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRequest], - operations_pb2.Operation]: - r"""Return a callable for the update tensorboard method over gRPC. - - Updates a Tensorboard. - - Returns: - Callable[[~.UpdateTensorboardRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard' not in self._stubs: - self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard', - request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_tensorboard'] - - @property - def list_tensorboards(self) -> Callable[ - [tensorboard_service.ListTensorboardsRequest], - tensorboard_service.ListTensorboardsResponse]: - r"""Return a callable for the list tensorboards method over gRPC. - - Lists Tensorboards in a Location. - - Returns: - Callable[[~.ListTensorboardsRequest], - ~.ListTensorboardsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboards' not in self._stubs: - self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards', - request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, - ) - return self._stubs['list_tensorboards'] - - @property - def delete_tensorboard(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard method over gRPC. - - Deletes a Tensorboard. - - Returns: - Callable[[~.DeleteTensorboardRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard' not in self._stubs: - self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard', - request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard'] - - @property - def create_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.CreateTensorboardExperimentRequest], - gca_tensorboard_experiment.TensorboardExperiment]: - r"""Return a callable for the create tensorboard experiment method over gRPC. - - Creates a TensorboardExperiment. - - Returns: - Callable[[~.CreateTensorboardExperimentRequest], - ~.TensorboardExperiment]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_experiment' not in self._stubs: - self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment', - request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['create_tensorboard_experiment'] - - @property - def get_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.GetTensorboardExperimentRequest], - tensorboard_experiment.TensorboardExperiment]: - r"""Return a callable for the get tensorboard experiment method over gRPC. - - Gets a TensorboardExperiment. - - Returns: - Callable[[~.GetTensorboardExperimentRequest], - ~.TensorboardExperiment]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_experiment' not in self._stubs: - self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment', - request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, - response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['get_tensorboard_experiment'] - - @property - def update_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.UpdateTensorboardExperimentRequest], - gca_tensorboard_experiment.TensorboardExperiment]: - r"""Return a callable for the update tensorboard experiment method over gRPC. - - Updates a TensorboardExperiment. - - Returns: - Callable[[~.UpdateTensorboardExperimentRequest], - ~.TensorboardExperiment]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_experiment' not in self._stubs: - self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment', - request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['update_tensorboard_experiment'] - - @property - def list_tensorboard_experiments(self) -> Callable[ - [tensorboard_service.ListTensorboardExperimentsRequest], - tensorboard_service.ListTensorboardExperimentsResponse]: - r"""Return a callable for the list tensorboard experiments method over gRPC. - - Lists TensorboardExperiments in a Location. - - Returns: - Callable[[~.ListTensorboardExperimentsRequest], - ~.ListTensorboardExperimentsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_experiments' not in self._stubs: - self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments', - request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, - ) - return self._stubs['list_tensorboard_experiments'] - - @property - def delete_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.DeleteTensorboardExperimentRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard experiment method over gRPC. - - Deletes a TensorboardExperiment. - - Returns: - Callable[[~.DeleteTensorboardExperimentRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_experiment' not in self._stubs: - self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment', - request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_experiment'] - - @property - def create_tensorboard_run(self) -> Callable[ - [tensorboard_service.CreateTensorboardRunRequest], - gca_tensorboard_run.TensorboardRun]: - r"""Return a callable for the create tensorboard run method over gRPC. - - Creates a TensorboardRun. - - Returns: - Callable[[~.CreateTensorboardRunRequest], - ~.TensorboardRun]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_run' not in self._stubs: - self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun', - request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['create_tensorboard_run'] - - @property - def get_tensorboard_run(self) -> Callable[ - [tensorboard_service.GetTensorboardRunRequest], - tensorboard_run.TensorboardRun]: - r"""Return a callable for the get tensorboard run method over gRPC. - - Gets a TensorboardRun. - - Returns: - Callable[[~.GetTensorboardRunRequest], - ~.TensorboardRun]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_run' not in self._stubs: - self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun', - request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, - response_deserializer=tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['get_tensorboard_run'] - - @property - def update_tensorboard_run(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRunRequest], - gca_tensorboard_run.TensorboardRun]: - r"""Return a callable for the update tensorboard run method over gRPC. - - Updates a TensorboardRun. - - Returns: - Callable[[~.UpdateTensorboardRunRequest], - ~.TensorboardRun]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_run' not in self._stubs: - self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun', - request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['update_tensorboard_run'] - - @property - def list_tensorboard_runs(self) -> Callable[ - [tensorboard_service.ListTensorboardRunsRequest], - tensorboard_service.ListTensorboardRunsResponse]: - r"""Return a callable for the list tensorboard runs method over gRPC. - - Lists TensorboardRuns in a Location. - - Returns: - Callable[[~.ListTensorboardRunsRequest], - ~.ListTensorboardRunsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_runs' not in self._stubs: - self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns', - request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, - ) - return self._stubs['list_tensorboard_runs'] - - @property - def delete_tensorboard_run(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRunRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard run method over gRPC. - - Deletes a TensorboardRun. - - Returns: - Callable[[~.DeleteTensorboardRunRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_run' not in self._stubs: - self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun', - request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_run'] - - @property - def create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.CreateTensorboardTimeSeriesRequest], - gca_tensorboard_time_series.TensorboardTimeSeries]: - r"""Return a callable for the create tensorboard time series method over gRPC. - - Creates a TensorboardTimeSeries. - - Returns: - Callable[[~.CreateTensorboardTimeSeriesRequest], - ~.TensorboardTimeSeries]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_time_series' not in self._stubs: - self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries', - request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['create_tensorboard_time_series'] - - @property - def get_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.GetTensorboardTimeSeriesRequest], - tensorboard_time_series.TensorboardTimeSeries]: - r"""Return a callable for the get tensorboard time series method over gRPC. - - Gets a TensorboardTimeSeries. - - Returns: - Callable[[~.GetTensorboardTimeSeriesRequest], - ~.TensorboardTimeSeries]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_time_series' not in self._stubs: - self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries', - request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['get_tensorboard_time_series'] - - @property - def update_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.UpdateTensorboardTimeSeriesRequest], - gca_tensorboard_time_series.TensorboardTimeSeries]: - r"""Return a callable for the update tensorboard time series method over gRPC. - - Updates a TensorboardTimeSeries. - - Returns: - Callable[[~.UpdateTensorboardTimeSeriesRequest], - ~.TensorboardTimeSeries]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_time_series' not in self._stubs: - self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries', - request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['update_tensorboard_time_series'] - - @property - def list_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.ListTensorboardTimeSeriesRequest], - tensorboard_service.ListTensorboardTimeSeriesResponse]: - r"""Return a callable for the list tensorboard time series method over gRPC. - - Lists TensorboardTimeSeries in a Location. - - Returns: - Callable[[~.ListTensorboardTimeSeriesRequest], - ~.ListTensorboardTimeSeriesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_time_series' not in self._stubs: - self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries', - request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['list_tensorboard_time_series'] - - @property - def delete_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.DeleteTensorboardTimeSeriesRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard time series method over gRPC. - - Deletes a TensorboardTimeSeries. - - Returns: - Callable[[~.DeleteTensorboardTimeSeriesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_time_series' not in self._stubs: - self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries', - request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_time_series'] - - @property - def read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], - tensorboard_service.ReadTensorboardTimeSeriesDataResponse]: - r"""Return a callable for the read tensorboard time series - data method over gRPC. - - Reads a TensorboardTimeSeries' data. Data is returned in - paginated responses. By default, if the number of data points - stored is less than 1000, all data will be returned. Otherwise, - 1000 data points will be randomly selected from this time series - and returned. This value can be changed by changing - max_data_points. - - Returns: - Callable[[~.ReadTensorboardTimeSeriesDataRequest], - ~.ReadTensorboardTimeSeriesDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_time_series_data' not in self._stubs: - self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_time_series_data'] - - @property - def read_tensorboard_blob_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardBlobDataRequest], - tensorboard_service.ReadTensorboardBlobDataResponse]: - r"""Return a callable for the read tensorboard blob data method over gRPC. - - Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Returns: - Callable[[~.ReadTensorboardBlobDataRequest], - ~.ReadTensorboardBlobDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_blob_data' not in self._stubs: - self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData', - request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_blob_data'] - - @property - def write_tensorboard_run_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardRunDataRequest], - tensorboard_service.WriteTensorboardRunDataResponse]: - r"""Return a callable for the write tensorboard run data method over gRPC. - - Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardRunDataRequest], - ~.WriteTensorboardRunDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_run_data' not in self._stubs: - self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData', - request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_run_data'] - - @property - def export_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], - tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: - r"""Return a callable for the export tensorboard time series - data method over gRPC. - - Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Returns: - Callable[[~.ExportTensorboardTimeSeriesDataRequest], - ~.ExportTensorboardTimeSeriesDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_tensorboard_time_series_data' not in self._stubs: - self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['export_tensorboard_time_series_data'] - - -__all__ = ( - 'TensorboardServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py deleted file mode 100644 index 3acc82eb36..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,893 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 # type: ignore -from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import TensorboardServiceGrpcTransport - - -class TensorboardServiceGrpcAsyncIOTransport(TensorboardServiceTransport): - """gRPC AsyncIO backend transport for TensorboardService. - - TensorboardService - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_tensorboard(self) -> Callable[ - [tensorboard_service.CreateTensorboardRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create tensorboard method over gRPC. - - Creates a Tensorboard. - - Returns: - Callable[[~.CreateTensorboardRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard' not in self._stubs: - self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard', - request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_tensorboard'] - - @property - def get_tensorboard(self) -> Callable[ - [tensorboard_service.GetTensorboardRequest], - Awaitable[tensorboard.Tensorboard]]: - r"""Return a callable for the get tensorboard method over gRPC. - - Gets a Tensorboard. - - Returns: - Callable[[~.GetTensorboardRequest], - Awaitable[~.Tensorboard]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard' not in self._stubs: - self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard', - request_serializer=tensorboard_service.GetTensorboardRequest.serialize, - response_deserializer=tensorboard.Tensorboard.deserialize, - ) - return self._stubs['get_tensorboard'] - - @property - def update_tensorboard(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update tensorboard method over gRPC. - - Updates a Tensorboard. - - Returns: - Callable[[~.UpdateTensorboardRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard' not in self._stubs: - self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard', - request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_tensorboard'] - - @property - def list_tensorboards(self) -> Callable[ - [tensorboard_service.ListTensorboardsRequest], - Awaitable[tensorboard_service.ListTensorboardsResponse]]: - r"""Return a callable for the list tensorboards method over gRPC. - - Lists Tensorboards in a Location. - - Returns: - Callable[[~.ListTensorboardsRequest], - Awaitable[~.ListTensorboardsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboards' not in self._stubs: - self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards', - request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, - ) - return self._stubs['list_tensorboards'] - - @property - def delete_tensorboard(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard method over gRPC. - - Deletes a Tensorboard. - - Returns: - Callable[[~.DeleteTensorboardRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard' not in self._stubs: - self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard', - request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard'] - - @property - def create_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.CreateTensorboardExperimentRequest], - Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: - r"""Return a callable for the create tensorboard experiment method over gRPC. - - Creates a TensorboardExperiment. - - Returns: - Callable[[~.CreateTensorboardExperimentRequest], - Awaitable[~.TensorboardExperiment]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_experiment' not in self._stubs: - self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment', - request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['create_tensorboard_experiment'] - - @property - def get_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.GetTensorboardExperimentRequest], - Awaitable[tensorboard_experiment.TensorboardExperiment]]: - r"""Return a callable for the get tensorboard experiment method over gRPC. - - Gets a TensorboardExperiment. - - Returns: - Callable[[~.GetTensorboardExperimentRequest], - Awaitable[~.TensorboardExperiment]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_experiment' not in self._stubs: - self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment', - request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, - response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['get_tensorboard_experiment'] - - @property - def update_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.UpdateTensorboardExperimentRequest], - Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: - r"""Return a callable for the update tensorboard experiment method over gRPC. - - Updates a TensorboardExperiment. - - Returns: - Callable[[~.UpdateTensorboardExperimentRequest], - Awaitable[~.TensorboardExperiment]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_experiment' not in self._stubs: - self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment', - request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['update_tensorboard_experiment'] - - @property - def list_tensorboard_experiments(self) -> Callable[ - [tensorboard_service.ListTensorboardExperimentsRequest], - Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]]: - r"""Return a callable for the list tensorboard experiments method over gRPC. - - Lists TensorboardExperiments in a Location. - - Returns: - Callable[[~.ListTensorboardExperimentsRequest], - Awaitable[~.ListTensorboardExperimentsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_experiments' not in self._stubs: - self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments', - request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, - ) - return self._stubs['list_tensorboard_experiments'] - - @property - def delete_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.DeleteTensorboardExperimentRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard experiment method over gRPC. - - Deletes a TensorboardExperiment. - - Returns: - Callable[[~.DeleteTensorboardExperimentRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_experiment' not in self._stubs: - self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment', - request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_experiment'] - - @property - def create_tensorboard_run(self) -> Callable[ - [tensorboard_service.CreateTensorboardRunRequest], - Awaitable[gca_tensorboard_run.TensorboardRun]]: - r"""Return a callable for the create tensorboard run method over gRPC. - - Creates a TensorboardRun. - - Returns: - Callable[[~.CreateTensorboardRunRequest], - Awaitable[~.TensorboardRun]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_run' not in self._stubs: - self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun', - request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['create_tensorboard_run'] - - @property - def get_tensorboard_run(self) -> Callable[ - [tensorboard_service.GetTensorboardRunRequest], - Awaitable[tensorboard_run.TensorboardRun]]: - r"""Return a callable for the get tensorboard run method over gRPC. - - Gets a TensorboardRun. - - Returns: - Callable[[~.GetTensorboardRunRequest], - Awaitable[~.TensorboardRun]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_run' not in self._stubs: - self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun', - request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, - response_deserializer=tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['get_tensorboard_run'] - - @property - def update_tensorboard_run(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRunRequest], - Awaitable[gca_tensorboard_run.TensorboardRun]]: - r"""Return a callable for the update tensorboard run method over gRPC. - - Updates a TensorboardRun. - - Returns: - Callable[[~.UpdateTensorboardRunRequest], - Awaitable[~.TensorboardRun]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_run' not in self._stubs: - self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun', - request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['update_tensorboard_run'] - - @property - def list_tensorboard_runs(self) -> Callable[ - [tensorboard_service.ListTensorboardRunsRequest], - Awaitable[tensorboard_service.ListTensorboardRunsResponse]]: - r"""Return a callable for the list tensorboard runs method over gRPC. - - Lists TensorboardRuns in a Location. - - Returns: - Callable[[~.ListTensorboardRunsRequest], - Awaitable[~.ListTensorboardRunsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_runs' not in self._stubs: - self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns', - request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, - ) - return self._stubs['list_tensorboard_runs'] - - @property - def delete_tensorboard_run(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRunRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard run method over gRPC. - - Deletes a TensorboardRun. - - Returns: - Callable[[~.DeleteTensorboardRunRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_run' not in self._stubs: - self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun', - request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_run'] - - @property - def create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.CreateTensorboardTimeSeriesRequest], - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: - r"""Return a callable for the create tensorboard time series method over gRPC. - - Creates a TensorboardTimeSeries. - - Returns: - Callable[[~.CreateTensorboardTimeSeriesRequest], - Awaitable[~.TensorboardTimeSeries]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_time_series' not in self._stubs: - self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries', - request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['create_tensorboard_time_series'] - - @property - def get_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.GetTensorboardTimeSeriesRequest], - Awaitable[tensorboard_time_series.TensorboardTimeSeries]]: - r"""Return a callable for the get tensorboard time series method over gRPC. - - Gets a TensorboardTimeSeries. - - Returns: - Callable[[~.GetTensorboardTimeSeriesRequest], - Awaitable[~.TensorboardTimeSeries]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_time_series' not in self._stubs: - self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries', - request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['get_tensorboard_time_series'] - - @property - def update_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.UpdateTensorboardTimeSeriesRequest], - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: - r"""Return a callable for the update tensorboard time series method over gRPC. - - Updates a TensorboardTimeSeries. - - Returns: - Callable[[~.UpdateTensorboardTimeSeriesRequest], - Awaitable[~.TensorboardTimeSeries]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_time_series' not in self._stubs: - self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries', - request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['update_tensorboard_time_series'] - - @property - def list_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.ListTensorboardTimeSeriesRequest], - Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]]: - r"""Return a callable for the list tensorboard time series method over gRPC. - - Lists TensorboardTimeSeries in a Location. - - Returns: - Callable[[~.ListTensorboardTimeSeriesRequest], - Awaitable[~.ListTensorboardTimeSeriesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_time_series' not in self._stubs: - self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries', - request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['list_tensorboard_time_series'] - - @property - def delete_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.DeleteTensorboardTimeSeriesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard time series method over gRPC. - - Deletes a TensorboardTimeSeries. - - Returns: - Callable[[~.DeleteTensorboardTimeSeriesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_time_series' not in self._stubs: - self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries', - request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_time_series'] - - @property - def read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], - Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse]]: - r"""Return a callable for the read tensorboard time series - data method over gRPC. - - Reads a TensorboardTimeSeries' data. Data is returned in - paginated responses. By default, if the number of data points - stored is less than 1000, all data will be returned. Otherwise, - 1000 data points will be randomly selected from this time series - and returned. This value can be changed by changing - max_data_points. - - Returns: - Callable[[~.ReadTensorboardTimeSeriesDataRequest], - Awaitable[~.ReadTensorboardTimeSeriesDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_time_series_data' not in self._stubs: - self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_time_series_data'] - - @property - def read_tensorboard_blob_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardBlobDataRequest], - Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse]]: - r"""Return a callable for the read tensorboard blob data method over gRPC. - - Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Returns: - Callable[[~.ReadTensorboardBlobDataRequest], - Awaitable[~.ReadTensorboardBlobDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_blob_data' not in self._stubs: - self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData', - request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_blob_data'] - - @property - def write_tensorboard_run_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardRunDataRequest], - Awaitable[tensorboard_service.WriteTensorboardRunDataResponse]]: - r"""Return a callable for the write tensorboard run data method over gRPC. - - Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardRunDataRequest], - Awaitable[~.WriteTensorboardRunDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_run_data' not in self._stubs: - self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData', - request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_run_data'] - - @property - def export_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], - Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]]: - r"""Return a callable for the export tensorboard time series - data method over gRPC. - - Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Returns: - Callable[[~.ExportTensorboardTimeSeriesDataRequest], - Awaitable[~.ExportTensorboardTimeSeriesDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_tensorboard_time_series_data' not in self._stubs: - self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['export_tensorboard_time_series_data'] - - -__all__ = ( - 'TensorboardServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py deleted file mode 100644 index d629499098..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import VizierServiceClient -from .async_client import VizierServiceAsyncClient - -__all__ = ( - 'VizierServiceClient', - 'VizierServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py deleted file mode 100644 index 0846b805df..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ /dev/null @@ -1,1279 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport -from .client import VizierServiceClient - - -class VizierServiceAsyncClient: - """Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - """ - - _client: VizierServiceClient - - DEFAULT_ENDPOINT = VizierServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = VizierServiceClient.DEFAULT_MTLS_ENDPOINT - - custom_job_path = staticmethod(VizierServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(VizierServiceClient.parse_custom_job_path) - study_path = staticmethod(VizierServiceClient.study_path) - parse_study_path = staticmethod(VizierServiceClient.parse_study_path) - trial_path = staticmethod(VizierServiceClient.trial_path) - parse_trial_path = staticmethod(VizierServiceClient.parse_trial_path) - common_billing_account_path = staticmethod(VizierServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(VizierServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(VizierServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(VizierServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(VizierServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(VizierServiceClient.parse_common_organization_path) - common_project_path = staticmethod(VizierServiceClient.common_project_path) - parse_common_project_path = staticmethod(VizierServiceClient.parse_common_project_path) - common_location_path = staticmethod(VizierServiceClient.common_location_path) - parse_common_location_path = staticmethod(VizierServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceAsyncClient: The constructed client. - """ - return VizierServiceClient.from_service_account_info.__func__(VizierServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceAsyncClient: The constructed client. - """ - return VizierServiceClient.from_service_account_file.__func__(VizierServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> VizierServiceTransport: - """Returns the transport used by the client instance. - - Returns: - VizierServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(VizierServiceClient).get_transport_class, type(VizierServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, VizierServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the vizier service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.VizierServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = VizierServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_study(self, - request: vizier_service.CreateStudyRequest = None, - *, - parent: str = None, - study: gca_study.Study = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_study.Study: - r"""Creates a Study. A resource name will be generated - after creation of the Study. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateStudyRequest`): - The request object. Request message for - [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. - parent (:class:`str`): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - study (:class:`google.cloud.aiplatform_v1beta1.types.Study`): - Required. The Study configuration - used to create the Study. - - This corresponds to the ``study`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - A message representing a Study. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, study]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.CreateStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if study is not None: - request.study = study - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_study, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_study(self, - request: vizier_service.GetStudyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Gets a Study by name. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetStudyRequest`): - The request object. Request message for - [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. - name (:class:`str`): - Required. The name of the Study resource. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - A message representing a Study. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.GetStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_study, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_studies(self, - request: vizier_service.ListStudiesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListStudiesAsyncPager: - r"""Lists all the studies in a region for an associated - project. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListStudiesRequest`): - The request object. Request message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - parent (:class:`str`): - Required. The resource name of the Location to list the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesAsyncPager: - Response message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.ListStudiesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_studies, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListStudiesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_study(self, - request: vizier_service.DeleteStudyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Study. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest`): - The request object. Request message for - [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. - name (:class:`str`): - Required. The name of the Study resource to be deleted. - Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.DeleteStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_study, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def lookup_study(self, - request: vizier_service.LookupStudyRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.LookupStudyRequest`): - The request object. Request message for - [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. - parent (:class:`str`): - Required. The resource name of the Location to get the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - A message representing a Study. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.LookupStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.lookup_study, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def suggest_trials(self, - request: vizier_service.SuggestTrialsRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest`): - The request object. Request message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` - Response message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.SuggestTrialsRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.suggest_trials, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - vizier_service.SuggestTrialsResponse, - metadata_type=vizier_service.SuggestTrialsMetadata, - ) - - # Done; return the response. - return response - - async def create_trial(self, - request: vizier_service.CreateTrialRequest = None, - *, - parent: str = None, - trial: study.Trial = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a user provided Trial to a Study. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CreateTrialRequest`): - The request object. Request message for - [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. - parent (:class:`str`): - Required. The resource name of the Study to create the - Trial in. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - trial (:class:`google.cloud.aiplatform_v1beta1.types.Trial`): - Required. The Trial to create. - This corresponds to the ``trial`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, trial]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.CreateTrialRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if trial is not None: - request.trial = trial - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_trial(self, - request: vizier_service.GetTrialRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Gets a Trial. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.GetTrialRequest`): - The request object. Request message for - [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. - name (:class:`str`): - Required. The name of the Trial resource. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.GetTrialRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_trials(self, - request: vizier_service.ListTrialsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrialsAsyncPager: - r"""Lists the Trials associated with a Study. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListTrialsRequest`): - The request object. Request message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - parent (:class:`str`): - Required. The resource name of the Study to list the - Trial from. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsAsyncPager: - Response message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.ListTrialsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_trials, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTrialsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def add_trial_measurement(self, - request: vizier_service.AddTrialMeasurementRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest`): - The request object. Request message for - [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.AddTrialMeasurementRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_trial_measurement, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def complete_trial(self, - request: vizier_service.CompleteTrialRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Marks a Trial as complete. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest`): - The request object. Request message for - [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.CompleteTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.complete_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_trial(self, - request: vizier_service.DeleteTrialRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Trial. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest`): - The request object. Request message for - [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. - name (:class:`str`): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.DeleteTrialRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def check_trial_early_stopping_state(self, - request: vizier_service.CheckTrialEarlyStoppingStateRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest`): - The request object. Request message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` - Response message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.check_trial_early_stopping_state, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - vizier_service.CheckTrialEarlyStoppingStateResponse, - metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, - ) - - # Done; return the response. - return response - - async def stop_trial(self, - request: vizier_service.StopTrialRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Stops a Trial. - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.StopTrialRequest`): - The request object. Request message for - [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.StopTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.stop_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_optimal_trials(self, - request: vizier_service.ListOptimalTrialsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> vizier_service.ListOptimalTrialsResponse: - r"""Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Args: - request (:class:`google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest`): - The request object. Request message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - parent (:class:`str`): - Required. The name of the Study that - the optimal Trial belongs to. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: - Response message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.ListOptimalTrialsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_optimal_trials, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "VizierServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py deleted file mode 100644 index c41dc2463c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ /dev/null @@ -1,1489 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import VizierServiceGrpcTransport -from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport - - -class VizierServiceClientMeta(type): - """Metaclass for the VizierService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] - _transport_registry["grpc"] = VizierServiceGrpcTransport - _transport_registry["grpc_asyncio"] = VizierServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[VizierServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class VizierServiceClient(metaclass=VizierServiceClientMeta): - """Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> VizierServiceTransport: - """Returns the transport used by the client instance. - - Returns: - VizierServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def study_path(project: str,location: str,study: str,) -> str: - """Returns a fully-qualified study string.""" - return "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) - - @staticmethod - def parse_study_path(path: str) -> Dict[str,str]: - """Parses a study path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def trial_path(project: str,location: str,study: str,trial: str,) -> str: - """Returns a fully-qualified trial string.""" - return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - - @staticmethod - def parse_trial_path(path: str) -> Dict[str,str]: - """Parses a trial path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, VizierServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the vizier service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, VizierServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, VizierServiceTransport): - # transport is a VizierServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - ) - - def create_study(self, - request: vizier_service.CreateStudyRequest = None, - *, - parent: str = None, - study: gca_study.Study = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_study.Study: - r"""Creates a Study. A resource name will be generated - after creation of the Study. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateStudyRequest): - The request object. Request message for - [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. - parent (str): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - study (google.cloud.aiplatform_v1beta1.types.Study): - Required. The Study configuration - used to create the Study. - - This corresponds to the ``study`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - A message representing a Study. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, study]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CreateStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CreateStudyRequest): - request = vizier_service.CreateStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if study is not None: - request.study = study - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_study(self, - request: vizier_service.GetStudyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Gets a Study by name. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetStudyRequest): - The request object. Request message for - [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. - name (str): - Required. The name of the Study resource. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - A message representing a Study. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.GetStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.GetStudyRequest): - request = vizier_service.GetStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_studies(self, - request: vizier_service.ListStudiesRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListStudiesPager: - r"""Lists all the studies in a region for an associated - project. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): - The request object. Request message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - parent (str): - Required. The resource name of the Location to list the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesPager: - Response message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.ListStudiesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.ListStudiesRequest): - request = vizier_service.ListStudiesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_studies] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListStudiesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_study(self, - request: vizier_service.DeleteStudyRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Study. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest): - The request object. Request message for - [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. - name (str): - Required. The name of the Study resource to be deleted. - Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.DeleteStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.DeleteStudyRequest): - request = vizier_service.DeleteStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def lookup_study(self, - request: vizier_service.LookupStudyRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Args: - request (google.cloud.aiplatform_v1beta1.types.LookupStudyRequest): - The request object. Request message for - [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. - parent (str): - Required. The resource name of the Location to get the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - A message representing a Study. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.LookupStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.LookupStudyRequest): - request = vizier_service.LookupStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.lookup_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def suggest_trials(self, - request: vizier_service.SuggestTrialsRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Args: - request (google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest): - The request object. Request message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` - Response message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.SuggestTrialsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.SuggestTrialsRequest): - request = vizier_service.SuggestTrialsRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.suggest_trials] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - vizier_service.SuggestTrialsResponse, - metadata_type=vizier_service.SuggestTrialsMetadata, - ) - - # Done; return the response. - return response - - def create_trial(self, - request: vizier_service.CreateTrialRequest = None, - *, - parent: str = None, - trial: study.Trial = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a user provided Trial to a Study. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CreateTrialRequest): - The request object. Request message for - [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. - parent (str): - Required. The resource name of the Study to create the - Trial in. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - trial (google.cloud.aiplatform_v1beta1.types.Trial): - Required. The Trial to create. - This corresponds to the ``trial`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, trial]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CreateTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CreateTrialRequest): - request = vizier_service.CreateTrialRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if trial is not None: - request.trial = trial - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_trial(self, - request: vizier_service.GetTrialRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Gets a Trial. - - Args: - request (google.cloud.aiplatform_v1beta1.types.GetTrialRequest): - The request object. Request message for - [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. - name (str): - Required. The name of the Trial resource. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.GetTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.GetTrialRequest): - request = vizier_service.GetTrialRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_trials(self, - request: vizier_service.ListTrialsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrialsPager: - r"""Lists the Trials associated with a Study. - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): - The request object. Request message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - parent (str): - Required. The resource name of the Study to list the - Trial from. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsPager: - Response message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.ListTrialsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.ListTrialsRequest): - request = vizier_service.ListTrialsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_trials] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTrialsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def add_trial_measurement(self, - request: vizier_service.AddTrialMeasurementRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Args: - request (google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest): - The request object. Request message for - [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.AddTrialMeasurementRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.AddTrialMeasurementRequest): - request = vizier_service.AddTrialMeasurementRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_trial_measurement] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def complete_trial(self, - request: vizier_service.CompleteTrialRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Marks a Trial as complete. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest): - The request object. Request message for - [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CompleteTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CompleteTrialRequest): - request = vizier_service.CompleteTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.complete_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_trial(self, - request: vizier_service.DeleteTrialRequest = None, - *, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Trial. - - Args: - request (google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest): - The request object. Request message for - [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.DeleteTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.DeleteTrialRequest): - request = vizier_service.DeleteTrialRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def check_trial_early_stopping_state(self, - request: vizier_service.CheckTrialEarlyStoppingStateRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Args: - request (google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest): - The request object. Request message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` - Response message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CheckTrialEarlyStoppingStateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CheckTrialEarlyStoppingStateRequest): - request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.check_trial_early_stopping_state] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - vizier_service.CheckTrialEarlyStoppingStateResponse, - metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, - ) - - # Done; return the response. - return response - - def stop_trial(self, - request: vizier_service.StopTrialRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Stops a Trial. - - Args: - request (google.cloud.aiplatform_v1beta1.types.StopTrialRequest): - The request object. Request message for - [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.StopTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.StopTrialRequest): - request = vizier_service.StopTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.stop_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_optimal_trials(self, - request: vizier_service.ListOptimalTrialsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> vizier_service.ListOptimalTrialsResponse: - r"""Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Args: - request (google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest): - The request object. Request message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - parent (str): - Required. The name of the Study that - the optimal Trial belongs to. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: - Response message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.ListOptimalTrialsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.ListOptimalTrialsRequest): - request = vizier_service.ListOptimalTrialsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_optimal_trials] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "VizierServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py deleted file mode 100644 index caaf517a27..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py +++ /dev/null @@ -1,263 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import vizier_service - - -class ListStudiesPager: - """A pager for iterating through ``list_studies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``studies`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListStudies`` requests and continue to iterate - through the ``studies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., vizier_service.ListStudiesResponse], - request: vizier_service.ListStudiesRequest, - response: vizier_service.ListStudiesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListStudiesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListStudiesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[vizier_service.ListStudiesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[study.Study]: - for page in self.pages: - yield from page.studies - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListStudiesAsyncPager: - """A pager for iterating through ``list_studies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``studies`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListStudies`` requests and continue to iterate - through the ``studies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[vizier_service.ListStudiesResponse]], - request: vizier_service.ListStudiesRequest, - response: vizier_service.ListStudiesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListStudiesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListStudiesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[vizier_service.ListStudiesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[study.Study]: - async def async_generator(): - async for page in self.pages: - for response in page.studies: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrialsPager: - """A pager for iterating through ``list_trials`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``trials`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTrials`` requests and continue to iterate - through the ``trials`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., vizier_service.ListTrialsResponse], - request: vizier_service.ListTrialsRequest, - response: vizier_service.ListTrialsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTrialsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListTrialsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[vizier_service.ListTrialsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[study.Trial]: - for page in self.pages: - yield from page.trials - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrialsAsyncPager: - """A pager for iterating through ``list_trials`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``trials`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTrials`` requests and continue to iterate - through the ``trials`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[vizier_service.ListTrialsResponse]], - request: vizier_service.ListTrialsRequest, - response: vizier_service.ListTrialsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTrialsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListTrialsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[vizier_service.ListTrialsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[study.Trial]: - async def async_generator(): - async for page in self.pages: - for response in page.trials: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py deleted file mode 100644 index afc70ea68e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import VizierServiceTransport -from .grpc import VizierServiceGrpcTransport -from .grpc_asyncio import VizierServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] -_transport_registry['grpc'] = VizierServiceGrpcTransport -_transport_registry['grpc_asyncio'] = VizierServiceGrpcAsyncIOTransport - -__all__ = ( - 'VizierServiceTransport', - 'VizierServiceGrpcTransport', - 'VizierServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py deleted file mode 100644 index bd9a9d9c50..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py +++ /dev/null @@ -1,374 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class VizierServiceTransport(abc.ABC): - """Abstract transport class for VizierService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_study: gapic_v1.method.wrap_method( - self.create_study, - default_timeout=5.0, - client_info=client_info, - ), - self.get_study: gapic_v1.method.wrap_method( - self.get_study, - default_timeout=5.0, - client_info=client_info, - ), - self.list_studies: gapic_v1.method.wrap_method( - self.list_studies, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_study: gapic_v1.method.wrap_method( - self.delete_study, - default_timeout=5.0, - client_info=client_info, - ), - self.lookup_study: gapic_v1.method.wrap_method( - self.lookup_study, - default_timeout=5.0, - client_info=client_info, - ), - self.suggest_trials: gapic_v1.method.wrap_method( - self.suggest_trials, - default_timeout=5.0, - client_info=client_info, - ), - self.create_trial: gapic_v1.method.wrap_method( - self.create_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.get_trial: gapic_v1.method.wrap_method( - self.get_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.list_trials: gapic_v1.method.wrap_method( - self.list_trials, - default_timeout=5.0, - client_info=client_info, - ), - self.add_trial_measurement: gapic_v1.method.wrap_method( - self.add_trial_measurement, - default_timeout=5.0, - client_info=client_info, - ), - self.complete_trial: gapic_v1.method.wrap_method( - self.complete_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_trial: gapic_v1.method.wrap_method( - self.delete_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.check_trial_early_stopping_state: gapic_v1.method.wrap_method( - self.check_trial_early_stopping_state, - default_timeout=5.0, - client_info=client_info, - ), - self.stop_trial: gapic_v1.method.wrap_method( - self.stop_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.list_optimal_trials: gapic_v1.method.wrap_method( - self.list_optimal_trials, - default_timeout=5.0, - client_info=client_info, - ), - } - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_study(self) -> Callable[ - [vizier_service.CreateStudyRequest], - Union[ - gca_study.Study, - Awaitable[gca_study.Study] - ]]: - raise NotImplementedError() - - @property - def get_study(self) -> Callable[ - [vizier_service.GetStudyRequest], - Union[ - study.Study, - Awaitable[study.Study] - ]]: - raise NotImplementedError() - - @property - def list_studies(self) -> Callable[ - [vizier_service.ListStudiesRequest], - Union[ - vizier_service.ListStudiesResponse, - Awaitable[vizier_service.ListStudiesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_study(self) -> Callable[ - [vizier_service.DeleteStudyRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def lookup_study(self) -> Callable[ - [vizier_service.LookupStudyRequest], - Union[ - study.Study, - Awaitable[study.Study] - ]]: - raise NotImplementedError() - - @property - def suggest_trials(self) -> Callable[ - [vizier_service.SuggestTrialsRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_trial(self) -> Callable[ - [vizier_service.CreateTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def get_trial(self) -> Callable[ - [vizier_service.GetTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def list_trials(self) -> Callable[ - [vizier_service.ListTrialsRequest], - Union[ - vizier_service.ListTrialsResponse, - Awaitable[vizier_service.ListTrialsResponse] - ]]: - raise NotImplementedError() - - @property - def add_trial_measurement(self) -> Callable[ - [vizier_service.AddTrialMeasurementRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def complete_trial(self) -> Callable[ - [vizier_service.CompleteTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def delete_trial(self) -> Callable[ - [vizier_service.DeleteTrialRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def check_trial_early_stopping_state(self) -> Callable[ - [vizier_service.CheckTrialEarlyStoppingStateRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def stop_trial(self) -> Callable[ - [vizier_service.StopTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def list_optimal_trials(self) -> Callable[ - [vizier_service.ListOptimalTrialsRequest], - Union[ - vizier_service.ListOptimalTrialsResponse, - Awaitable[vizier_service.ListOptimalTrialsResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'VizierServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py deleted file mode 100644 index 7212be0372..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py +++ /dev/null @@ -1,657 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO - - -class VizierServiceGrpcTransport(VizierServiceTransport): - """gRPC backend transport for VizierService. - - Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_study(self) -> Callable[ - [vizier_service.CreateStudyRequest], - gca_study.Study]: - r"""Return a callable for the create study method over gRPC. - - Creates a Study. A resource name will be generated - after creation of the Study. - - Returns: - Callable[[~.CreateStudyRequest], - ~.Study]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_study' not in self._stubs: - self._stubs['create_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CreateStudy', - request_serializer=vizier_service.CreateStudyRequest.serialize, - response_deserializer=gca_study.Study.deserialize, - ) - return self._stubs['create_study'] - - @property - def get_study(self) -> Callable[ - [vizier_service.GetStudyRequest], - study.Study]: - r"""Return a callable for the get study method over gRPC. - - Gets a Study by name. - - Returns: - Callable[[~.GetStudyRequest], - ~.Study]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_study' not in self._stubs: - self._stubs['get_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/GetStudy', - request_serializer=vizier_service.GetStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['get_study'] - - @property - def list_studies(self) -> Callable[ - [vizier_service.ListStudiesRequest], - vizier_service.ListStudiesResponse]: - r"""Return a callable for the list studies method over gRPC. - - Lists all the studies in a region for an associated - project. - - Returns: - Callable[[~.ListStudiesRequest], - ~.ListStudiesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_studies' not in self._stubs: - self._stubs['list_studies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListStudies', - request_serializer=vizier_service.ListStudiesRequest.serialize, - response_deserializer=vizier_service.ListStudiesResponse.deserialize, - ) - return self._stubs['list_studies'] - - @property - def delete_study(self) -> Callable[ - [vizier_service.DeleteStudyRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete study method over gRPC. - - Deletes a Study. - - Returns: - Callable[[~.DeleteStudyRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_study' not in self._stubs: - self._stubs['delete_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/DeleteStudy', - request_serializer=vizier_service.DeleteStudyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_study'] - - @property - def lookup_study(self) -> Callable[ - [vizier_service.LookupStudyRequest], - study.Study]: - r"""Return a callable for the lookup study method over gRPC. - - Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Returns: - Callable[[~.LookupStudyRequest], - ~.Study]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'lookup_study' not in self._stubs: - self._stubs['lookup_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/LookupStudy', - request_serializer=vizier_service.LookupStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['lookup_study'] - - @property - def suggest_trials(self) -> Callable[ - [vizier_service.SuggestTrialsRequest], - operations_pb2.Operation]: - r"""Return a callable for the suggest trials method over gRPC. - - Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Returns: - Callable[[~.SuggestTrialsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'suggest_trials' not in self._stubs: - self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/SuggestTrials', - request_serializer=vizier_service.SuggestTrialsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['suggest_trials'] - - @property - def create_trial(self) -> Callable[ - [vizier_service.CreateTrialRequest], - study.Trial]: - r"""Return a callable for the create trial method over gRPC. - - Adds a user provided Trial to a Study. - - Returns: - Callable[[~.CreateTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_trial' not in self._stubs: - self._stubs['create_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial', - request_serializer=vizier_service.CreateTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['create_trial'] - - @property - def get_trial(self) -> Callable[ - [vizier_service.GetTrialRequest], - study.Trial]: - r"""Return a callable for the get trial method over gRPC. - - Gets a Trial. - - Returns: - Callable[[~.GetTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_trial' not in self._stubs: - self._stubs['get_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/GetTrial', - request_serializer=vizier_service.GetTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['get_trial'] - - @property - def list_trials(self) -> Callable[ - [vizier_service.ListTrialsRequest], - vizier_service.ListTrialsResponse]: - r"""Return a callable for the list trials method over gRPC. - - Lists the Trials associated with a Study. - - Returns: - Callable[[~.ListTrialsRequest], - ~.ListTrialsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_trials' not in self._stubs: - self._stubs['list_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListTrials', - request_serializer=vizier_service.ListTrialsRequest.serialize, - response_deserializer=vizier_service.ListTrialsResponse.deserialize, - ) - return self._stubs['list_trials'] - - @property - def add_trial_measurement(self) -> Callable[ - [vizier_service.AddTrialMeasurementRequest], - study.Trial]: - r"""Return a callable for the add trial measurement method over gRPC. - - Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Returns: - Callable[[~.AddTrialMeasurementRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_trial_measurement' not in self._stubs: - self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement', - request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['add_trial_measurement'] - - @property - def complete_trial(self) -> Callable[ - [vizier_service.CompleteTrialRequest], - study.Trial]: - r"""Return a callable for the complete trial method over gRPC. - - Marks a Trial as complete. - - Returns: - Callable[[~.CompleteTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_trial' not in self._stubs: - self._stubs['complete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial', - request_serializer=vizier_service.CompleteTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['complete_trial'] - - @property - def delete_trial(self) -> Callable[ - [vizier_service.DeleteTrialRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete trial method over gRPC. - - Deletes a Trial. - - Returns: - Callable[[~.DeleteTrialRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_trial' not in self._stubs: - self._stubs['delete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial', - request_serializer=vizier_service.DeleteTrialRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_trial'] - - @property - def check_trial_early_stopping_state(self) -> Callable[ - [vizier_service.CheckTrialEarlyStoppingStateRequest], - operations_pb2.Operation]: - r"""Return a callable for the check trial early stopping - state method over gRPC. - - Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Returns: - Callable[[~.CheckTrialEarlyStoppingStateRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_trial_early_stopping_state' not in self._stubs: - self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState', - request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['check_trial_early_stopping_state'] - - @property - def stop_trial(self) -> Callable[ - [vizier_service.StopTrialRequest], - study.Trial]: - r"""Return a callable for the stop trial method over gRPC. - - Stops a Trial. - - Returns: - Callable[[~.StopTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'stop_trial' not in self._stubs: - self._stubs['stop_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/StopTrial', - request_serializer=vizier_service.StopTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['stop_trial'] - - @property - def list_optimal_trials(self) -> Callable[ - [vizier_service.ListOptimalTrialsRequest], - vizier_service.ListOptimalTrialsResponse]: - r"""Return a callable for the list optimal trials method over gRPC. - - Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Returns: - Callable[[~.ListOptimalTrialsRequest], - ~.ListOptimalTrialsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_optimal_trials' not in self._stubs: - self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListOptimalTrials', - request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, - response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, - ) - return self._stubs['list_optimal_trials'] - - -__all__ = ( - 'VizierServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py deleted file mode 100644 index bb230b8767..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,661 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import VizierServiceGrpcTransport - - -class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): - """gRPC AsyncIO backend transport for VizierService. - - Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_study(self) -> Callable[ - [vizier_service.CreateStudyRequest], - Awaitable[gca_study.Study]]: - r"""Return a callable for the create study method over gRPC. - - Creates a Study. A resource name will be generated - after creation of the Study. - - Returns: - Callable[[~.CreateStudyRequest], - Awaitable[~.Study]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_study' not in self._stubs: - self._stubs['create_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CreateStudy', - request_serializer=vizier_service.CreateStudyRequest.serialize, - response_deserializer=gca_study.Study.deserialize, - ) - return self._stubs['create_study'] - - @property - def get_study(self) -> Callable[ - [vizier_service.GetStudyRequest], - Awaitable[study.Study]]: - r"""Return a callable for the get study method over gRPC. - - Gets a Study by name. - - Returns: - Callable[[~.GetStudyRequest], - Awaitable[~.Study]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_study' not in self._stubs: - self._stubs['get_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/GetStudy', - request_serializer=vizier_service.GetStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['get_study'] - - @property - def list_studies(self) -> Callable[ - [vizier_service.ListStudiesRequest], - Awaitable[vizier_service.ListStudiesResponse]]: - r"""Return a callable for the list studies method over gRPC. - - Lists all the studies in a region for an associated - project. - - Returns: - Callable[[~.ListStudiesRequest], - Awaitable[~.ListStudiesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_studies' not in self._stubs: - self._stubs['list_studies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListStudies', - request_serializer=vizier_service.ListStudiesRequest.serialize, - response_deserializer=vizier_service.ListStudiesResponse.deserialize, - ) - return self._stubs['list_studies'] - - @property - def delete_study(self) -> Callable[ - [vizier_service.DeleteStudyRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete study method over gRPC. - - Deletes a Study. - - Returns: - Callable[[~.DeleteStudyRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_study' not in self._stubs: - self._stubs['delete_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/DeleteStudy', - request_serializer=vizier_service.DeleteStudyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_study'] - - @property - def lookup_study(self) -> Callable[ - [vizier_service.LookupStudyRequest], - Awaitable[study.Study]]: - r"""Return a callable for the lookup study method over gRPC. - - Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Returns: - Callable[[~.LookupStudyRequest], - Awaitable[~.Study]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'lookup_study' not in self._stubs: - self._stubs['lookup_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/LookupStudy', - request_serializer=vizier_service.LookupStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['lookup_study'] - - @property - def suggest_trials(self) -> Callable[ - [vizier_service.SuggestTrialsRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the suggest trials method over gRPC. - - Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Returns: - Callable[[~.SuggestTrialsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'suggest_trials' not in self._stubs: - self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/SuggestTrials', - request_serializer=vizier_service.SuggestTrialsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['suggest_trials'] - - @property - def create_trial(self) -> Callable[ - [vizier_service.CreateTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the create trial method over gRPC. - - Adds a user provided Trial to a Study. - - Returns: - Callable[[~.CreateTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_trial' not in self._stubs: - self._stubs['create_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial', - request_serializer=vizier_service.CreateTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['create_trial'] - - @property - def get_trial(self) -> Callable[ - [vizier_service.GetTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the get trial method over gRPC. - - Gets a Trial. - - Returns: - Callable[[~.GetTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_trial' not in self._stubs: - self._stubs['get_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/GetTrial', - request_serializer=vizier_service.GetTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['get_trial'] - - @property - def list_trials(self) -> Callable[ - [vizier_service.ListTrialsRequest], - Awaitable[vizier_service.ListTrialsResponse]]: - r"""Return a callable for the list trials method over gRPC. - - Lists the Trials associated with a Study. - - Returns: - Callable[[~.ListTrialsRequest], - Awaitable[~.ListTrialsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_trials' not in self._stubs: - self._stubs['list_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListTrials', - request_serializer=vizier_service.ListTrialsRequest.serialize, - response_deserializer=vizier_service.ListTrialsResponse.deserialize, - ) - return self._stubs['list_trials'] - - @property - def add_trial_measurement(self) -> Callable[ - [vizier_service.AddTrialMeasurementRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the add trial measurement method over gRPC. - - Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Returns: - Callable[[~.AddTrialMeasurementRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_trial_measurement' not in self._stubs: - self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement', - request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['add_trial_measurement'] - - @property - def complete_trial(self) -> Callable[ - [vizier_service.CompleteTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the complete trial method over gRPC. - - Marks a Trial as complete. - - Returns: - Callable[[~.CompleteTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_trial' not in self._stubs: - self._stubs['complete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial', - request_serializer=vizier_service.CompleteTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['complete_trial'] - - @property - def delete_trial(self) -> Callable[ - [vizier_service.DeleteTrialRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete trial method over gRPC. - - Deletes a Trial. - - Returns: - Callable[[~.DeleteTrialRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_trial' not in self._stubs: - self._stubs['delete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial', - request_serializer=vizier_service.DeleteTrialRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_trial'] - - @property - def check_trial_early_stopping_state(self) -> Callable[ - [vizier_service.CheckTrialEarlyStoppingStateRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the check trial early stopping - state method over gRPC. - - Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Returns: - Callable[[~.CheckTrialEarlyStoppingStateRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_trial_early_stopping_state' not in self._stubs: - self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState', - request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['check_trial_early_stopping_state'] - - @property - def stop_trial(self) -> Callable[ - [vizier_service.StopTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the stop trial method over gRPC. - - Stops a Trial. - - Returns: - Callable[[~.StopTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'stop_trial' not in self._stubs: - self._stubs['stop_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/StopTrial', - request_serializer=vizier_service.StopTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['stop_trial'] - - @property - def list_optimal_trials(self) -> Callable[ - [vizier_service.ListOptimalTrialsRequest], - Awaitable[vizier_service.ListOptimalTrialsResponse]]: - r"""Return a callable for the list optimal trials method over gRPC. - - Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Returns: - Callable[[~.ListOptimalTrialsRequest], - Awaitable[~.ListOptimalTrialsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_optimal_trials' not in self._stubs: - self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListOptimalTrials', - request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, - response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, - ) - return self._stubs['list_optimal_trials'] - - -__all__ = ( - 'VizierServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py deleted file mode 100644 index d50ced74ac..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py +++ /dev/null @@ -1,947 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .annotation import ( - Annotation, -) -from .annotation_spec import ( - AnnotationSpec, -) -from .artifact import ( - Artifact, -) -from .batch_prediction_job import ( - BatchPredictionJob, -) -from .completion_stats import ( - CompletionStats, -) -from .context import ( - Context, -) -from .custom_job import ( - ContainerSpec, - CustomJob, - CustomJobSpec, - PythonPackageSpec, - Scheduling, - WorkerPoolSpec, -) -from .data_item import ( - DataItem, -) -from .data_labeling_job import ( - ActiveLearningConfig, - DataLabelingJob, - SampleConfig, - TrainingConfig, -) -from .dataset import ( - Dataset, - ExportDataConfig, - ImportDataConfig, -) -from .dataset_service import ( - CreateDatasetOperationMetadata, - CreateDatasetRequest, - DeleteDatasetRequest, - ExportDataOperationMetadata, - ExportDataRequest, - ExportDataResponse, - GetAnnotationSpecRequest, - GetDatasetRequest, - ImportDataOperationMetadata, - ImportDataRequest, - ImportDataResponse, - ListAnnotationsRequest, - ListAnnotationsResponse, - ListDataItemsRequest, - ListDataItemsResponse, - ListDatasetsRequest, - ListDatasetsResponse, - UpdateDatasetRequest, -) -from .deployed_index_ref import ( - DeployedIndexRef, -) -from .deployed_model_ref import ( - DeployedModelRef, -) -from .encryption_spec import ( - EncryptionSpec, -) -from .endpoint import ( - DeployedModel, - Endpoint, -) -from .endpoint_service import ( - CreateEndpointOperationMetadata, - CreateEndpointRequest, - DeleteEndpointRequest, - DeployModelOperationMetadata, - DeployModelRequest, - DeployModelResponse, - GetEndpointRequest, - ListEndpointsRequest, - ListEndpointsResponse, - UndeployModelOperationMetadata, - UndeployModelRequest, - UndeployModelResponse, - UpdateEndpointRequest, -) -from .entity_type import ( - EntityType, -) -from .env_var import ( - EnvVar, -) -from .event import ( - Event, -) -from .execution import ( - Execution, -) -from .explanation import ( - Attribution, - Explanation, - ExplanationMetadataOverride, - ExplanationParameters, - ExplanationSpec, - ExplanationSpecOverride, - FeatureNoiseSigma, - IntegratedGradientsAttribution, - ModelExplanation, - SampledShapleyAttribution, - SmoothGradConfig, - XraiAttribution, -) -from .explanation_metadata import ( - ExplanationMetadata, -) -from .feature import ( - Feature, -) -from .feature_monitoring_stats import ( - FeatureStatsAnomaly, -) -from .feature_selector import ( - FeatureSelector, - IdMatcher, -) -from .featurestore import ( - Featurestore, -) -from .featurestore_monitoring import ( - FeaturestoreMonitoringConfig, -) -from .featurestore_online_service import ( - FeatureValue, - FeatureValueList, - ReadFeatureValuesRequest, - ReadFeatureValuesResponse, - StreamingReadFeatureValuesRequest, -) -from .featurestore_service import ( - BatchCreateFeaturesOperationMetadata, - BatchCreateFeaturesRequest, - BatchCreateFeaturesResponse, - BatchReadFeatureValuesOperationMetadata, - BatchReadFeatureValuesRequest, - BatchReadFeatureValuesResponse, - CreateEntityTypeOperationMetadata, - CreateEntityTypeRequest, - CreateFeatureOperationMetadata, - CreateFeatureRequest, - CreateFeaturestoreOperationMetadata, - CreateFeaturestoreRequest, - DeleteEntityTypeRequest, - DeleteFeatureRequest, - DeleteFeaturestoreRequest, - DestinationFeatureSetting, - ExportFeatureValuesOperationMetadata, - ExportFeatureValuesRequest, - ExportFeatureValuesResponse, - FeatureValueDestination, - GetEntityTypeRequest, - GetFeatureRequest, - GetFeaturestoreRequest, - ImportFeatureValuesOperationMetadata, - ImportFeatureValuesRequest, - ImportFeatureValuesResponse, - ListEntityTypesRequest, - ListEntityTypesResponse, - ListFeaturesRequest, - ListFeaturesResponse, - ListFeaturestoresRequest, - ListFeaturestoresResponse, - SearchFeaturesRequest, - SearchFeaturesResponse, - UpdateEntityTypeRequest, - UpdateFeatureRequest, - UpdateFeaturestoreOperationMetadata, - UpdateFeaturestoreRequest, -) -from .hyperparameter_tuning_job import ( - HyperparameterTuningJob, -) -from .index import ( - Index, -) -from .index_endpoint import ( - DeployedIndex, - DeployedIndexAuthConfig, - IndexEndpoint, - IndexPrivateEndpoints, -) -from .index_endpoint_service import ( - CreateIndexEndpointOperationMetadata, - CreateIndexEndpointRequest, - DeleteIndexEndpointRequest, - DeployIndexOperationMetadata, - DeployIndexRequest, - DeployIndexResponse, - GetIndexEndpointRequest, - ListIndexEndpointsRequest, - ListIndexEndpointsResponse, - UndeployIndexOperationMetadata, - UndeployIndexRequest, - UndeployIndexResponse, - UpdateIndexEndpointRequest, -) -from .index_service import ( - CreateIndexOperationMetadata, - CreateIndexRequest, - DeleteIndexRequest, - GetIndexRequest, - ListIndexesRequest, - ListIndexesResponse, - NearestNeighborSearchOperationMetadata, - UpdateIndexOperationMetadata, - UpdateIndexRequest, -) -from .io import ( - AvroSource, - BigQueryDestination, - BigQuerySource, - ContainerRegistryDestination, - CsvDestination, - CsvSource, - GcsDestination, - GcsSource, - TFRecordDestination, -) -from .job_service import ( - CancelBatchPredictionJobRequest, - CancelCustomJobRequest, - CancelDataLabelingJobRequest, - CancelHyperparameterTuningJobRequest, - CreateBatchPredictionJobRequest, - CreateCustomJobRequest, - CreateDataLabelingJobRequest, - CreateHyperparameterTuningJobRequest, - CreateModelDeploymentMonitoringJobRequest, - DeleteBatchPredictionJobRequest, - DeleteCustomJobRequest, - DeleteDataLabelingJobRequest, - DeleteHyperparameterTuningJobRequest, - DeleteModelDeploymentMonitoringJobRequest, - GetBatchPredictionJobRequest, - GetCustomJobRequest, - GetDataLabelingJobRequest, - GetHyperparameterTuningJobRequest, - GetModelDeploymentMonitoringJobRequest, - ListBatchPredictionJobsRequest, - ListBatchPredictionJobsResponse, - ListCustomJobsRequest, - ListCustomJobsResponse, - ListDataLabelingJobsRequest, - ListDataLabelingJobsResponse, - ListHyperparameterTuningJobsRequest, - ListHyperparameterTuningJobsResponse, - ListModelDeploymentMonitoringJobsRequest, - ListModelDeploymentMonitoringJobsResponse, - PauseModelDeploymentMonitoringJobRequest, - ResumeModelDeploymentMonitoringJobRequest, - SearchModelDeploymentMonitoringStatsAnomaliesRequest, - SearchModelDeploymentMonitoringStatsAnomaliesResponse, - UpdateModelDeploymentMonitoringJobOperationMetadata, - UpdateModelDeploymentMonitoringJobRequest, -) -from .lineage_subgraph import ( - LineageSubgraph, -) -from .machine_resources import ( - AutomaticResources, - AutoscalingMetricSpec, - BatchDedicatedResources, - DedicatedResources, - DiskSpec, - MachineSpec, - ResourcesConsumed, -) -from .manual_batch_tuning_parameters import ( - ManualBatchTuningParameters, -) -from .metadata_schema import ( - MetadataSchema, -) -from .metadata_service import ( - AddContextArtifactsAndExecutionsRequest, - AddContextArtifactsAndExecutionsResponse, - AddContextChildrenRequest, - AddContextChildrenResponse, - AddExecutionEventsRequest, - AddExecutionEventsResponse, - CreateArtifactRequest, - CreateContextRequest, - CreateExecutionRequest, - CreateMetadataSchemaRequest, - CreateMetadataStoreOperationMetadata, - CreateMetadataStoreRequest, - DeleteContextRequest, - DeleteMetadataStoreOperationMetadata, - DeleteMetadataStoreRequest, - GetArtifactRequest, - GetContextRequest, - GetExecutionRequest, - GetMetadataSchemaRequest, - GetMetadataStoreRequest, - ListArtifactsRequest, - ListArtifactsResponse, - ListContextsRequest, - ListContextsResponse, - ListExecutionsRequest, - ListExecutionsResponse, - ListMetadataSchemasRequest, - ListMetadataSchemasResponse, - ListMetadataStoresRequest, - ListMetadataStoresResponse, - QueryArtifactLineageSubgraphRequest, - QueryContextLineageSubgraphRequest, - QueryExecutionInputsAndOutputsRequest, - UpdateArtifactRequest, - UpdateContextRequest, - UpdateExecutionRequest, -) -from .metadata_store import ( - MetadataStore, -) -from .migratable_resource import ( - MigratableResource, -) -from .migration_service import ( - BatchMigrateResourcesOperationMetadata, - BatchMigrateResourcesRequest, - BatchMigrateResourcesResponse, - MigrateResourceRequest, - MigrateResourceResponse, - SearchMigratableResourcesRequest, - SearchMigratableResourcesResponse, -) -from .model import ( - Model, - ModelContainerSpec, - Port, - PredictSchemata, -) -from .model_deployment_monitoring_job import ( - ModelDeploymentMonitoringBigQueryTable, - ModelDeploymentMonitoringJob, - ModelDeploymentMonitoringObjectiveConfig, - ModelDeploymentMonitoringScheduleConfig, - ModelMonitoringStatsAnomalies, - ModelDeploymentMonitoringObjectiveType, -) -from .model_evaluation import ( - ModelEvaluation, -) -from .model_evaluation_slice import ( - ModelEvaluationSlice, -) -from .model_monitoring import ( - ModelMonitoringAlertConfig, - ModelMonitoringObjectiveConfig, - SamplingStrategy, - ThresholdConfig, -) -from .model_service import ( - DeleteModelRequest, - ExportModelOperationMetadata, - ExportModelRequest, - ExportModelResponse, - GetModelEvaluationRequest, - GetModelEvaluationSliceRequest, - GetModelRequest, - ListModelEvaluationSlicesRequest, - ListModelEvaluationSlicesResponse, - ListModelEvaluationsRequest, - ListModelEvaluationsResponse, - ListModelsRequest, - ListModelsResponse, - UpdateModelRequest, - UploadModelOperationMetadata, - UploadModelRequest, - UploadModelResponse, -) -from .operation import ( - DeleteOperationMetadata, - GenericOperationMetadata, -) -from .pipeline_job import ( - PipelineJob, - PipelineJobDetail, - PipelineTaskDetail, - PipelineTaskExecutorDetail, -) -from .pipeline_service import ( - CancelPipelineJobRequest, - CancelTrainingPipelineRequest, - CreatePipelineJobRequest, - CreateTrainingPipelineRequest, - DeletePipelineJobRequest, - DeleteTrainingPipelineRequest, - GetPipelineJobRequest, - GetTrainingPipelineRequest, - ListPipelineJobsRequest, - ListPipelineJobsResponse, - ListTrainingPipelinesRequest, - ListTrainingPipelinesResponse, -) -from .prediction_service import ( - ExplainRequest, - ExplainResponse, - PredictRequest, - PredictResponse, -) -from .specialist_pool import ( - SpecialistPool, -) -from .specialist_pool_service import ( - CreateSpecialistPoolOperationMetadata, - CreateSpecialistPoolRequest, - DeleteSpecialistPoolRequest, - GetSpecialistPoolRequest, - ListSpecialistPoolsRequest, - ListSpecialistPoolsResponse, - UpdateSpecialistPoolOperationMetadata, - UpdateSpecialistPoolRequest, -) -from .study import ( - Measurement, - Study, - StudySpec, - Trial, -) -from .tensorboard import ( - Tensorboard, -) -from .tensorboard_data import ( - Scalar, - TensorboardBlob, - TensorboardBlobSequence, - TensorboardTensor, - TimeSeriesData, - TimeSeriesDataPoint, -) -from .tensorboard_experiment import ( - TensorboardExperiment, -) -from .tensorboard_run import ( - TensorboardRun, -) -from .tensorboard_service import ( - CreateTensorboardExperimentRequest, - CreateTensorboardOperationMetadata, - CreateTensorboardRequest, - CreateTensorboardRunRequest, - CreateTensorboardTimeSeriesRequest, - DeleteTensorboardExperimentRequest, - DeleteTensorboardRequest, - DeleteTensorboardRunRequest, - DeleteTensorboardTimeSeriesRequest, - ExportTensorboardTimeSeriesDataRequest, - ExportTensorboardTimeSeriesDataResponse, - GetTensorboardExperimentRequest, - GetTensorboardRequest, - GetTensorboardRunRequest, - GetTensorboardTimeSeriesRequest, - ListTensorboardExperimentsRequest, - ListTensorboardExperimentsResponse, - ListTensorboardRunsRequest, - ListTensorboardRunsResponse, - ListTensorboardsRequest, - ListTensorboardsResponse, - ListTensorboardTimeSeriesRequest, - ListTensorboardTimeSeriesResponse, - ReadTensorboardBlobDataRequest, - ReadTensorboardBlobDataResponse, - ReadTensorboardTimeSeriesDataRequest, - ReadTensorboardTimeSeriesDataResponse, - UpdateTensorboardExperimentRequest, - UpdateTensorboardOperationMetadata, - UpdateTensorboardRequest, - UpdateTensorboardRunRequest, - UpdateTensorboardTimeSeriesRequest, - WriteTensorboardRunDataRequest, - WriteTensorboardRunDataResponse, -) -from .tensorboard_time_series import ( - TensorboardTimeSeries, -) -from .training_pipeline import ( - FilterSplit, - FractionSplit, - InputDataConfig, - PredefinedSplit, - TimestampSplit, - TrainingPipeline, -) -from .types import ( - BoolArray, - DoubleArray, - Int64Array, - StringArray, -) -from .user_action_reference import ( - UserActionReference, -) -from .value import ( - Value, -) -from .vizier_service import ( - AddTrialMeasurementRequest, - CheckTrialEarlyStoppingStateMetatdata, - CheckTrialEarlyStoppingStateRequest, - CheckTrialEarlyStoppingStateResponse, - CompleteTrialRequest, - CreateStudyRequest, - CreateTrialRequest, - DeleteStudyRequest, - DeleteTrialRequest, - GetStudyRequest, - GetTrialRequest, - ListOptimalTrialsRequest, - ListOptimalTrialsResponse, - ListStudiesRequest, - ListStudiesResponse, - ListTrialsRequest, - ListTrialsResponse, - LookupStudyRequest, - StopTrialRequest, - SuggestTrialsMetadata, - SuggestTrialsRequest, - SuggestTrialsResponse, -) - -__all__ = ( - 'AcceleratorType', - 'Annotation', - 'AnnotationSpec', - 'Artifact', - 'BatchPredictionJob', - 'CompletionStats', - 'Context', - 'ContainerSpec', - 'CustomJob', - 'CustomJobSpec', - 'PythonPackageSpec', - 'Scheduling', - 'WorkerPoolSpec', - 'DataItem', - 'ActiveLearningConfig', - 'DataLabelingJob', - 'SampleConfig', - 'TrainingConfig', - 'Dataset', - 'ExportDataConfig', - 'ImportDataConfig', - 'CreateDatasetOperationMetadata', - 'CreateDatasetRequest', - 'DeleteDatasetRequest', - 'ExportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'ImportDataOperationMetadata', - 'ImportDataRequest', - 'ImportDataResponse', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeployedIndexRef', - 'DeployedModelRef', - 'EncryptionSpec', - 'DeployedModel', - 'Endpoint', - 'CreateEndpointOperationMetadata', - 'CreateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelOperationMetadata', - 'DeployModelRequest', - 'DeployModelResponse', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UndeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UpdateEndpointRequest', - 'EntityType', - 'EnvVar', - 'Event', - 'Execution', - 'Attribution', - 'Explanation', - 'ExplanationMetadataOverride', - 'ExplanationParameters', - 'ExplanationSpec', - 'ExplanationSpecOverride', - 'FeatureNoiseSigma', - 'IntegratedGradientsAttribution', - 'ModelExplanation', - 'SampledShapleyAttribution', - 'SmoothGradConfig', - 'XraiAttribution', - 'ExplanationMetadata', - 'Feature', - 'FeatureStatsAnomaly', - 'FeatureSelector', - 'IdMatcher', - 'Featurestore', - 'FeaturestoreMonitoringConfig', - 'FeatureValue', - 'FeatureValueList', - 'ReadFeatureValuesRequest', - 'ReadFeatureValuesResponse', - 'StreamingReadFeatureValuesRequest', - 'BatchCreateFeaturesOperationMetadata', - 'BatchCreateFeaturesRequest', - 'BatchCreateFeaturesResponse', - 'BatchReadFeatureValuesOperationMetadata', - 'BatchReadFeatureValuesRequest', - 'BatchReadFeatureValuesResponse', - 'CreateEntityTypeOperationMetadata', - 'CreateEntityTypeRequest', - 'CreateFeatureOperationMetadata', - 'CreateFeatureRequest', - 'CreateFeaturestoreOperationMetadata', - 'CreateFeaturestoreRequest', - 'DeleteEntityTypeRequest', - 'DeleteFeatureRequest', - 'DeleteFeaturestoreRequest', - 'DestinationFeatureSetting', - 'ExportFeatureValuesOperationMetadata', - 'ExportFeatureValuesRequest', - 'ExportFeatureValuesResponse', - 'FeatureValueDestination', - 'GetEntityTypeRequest', - 'GetFeatureRequest', - 'GetFeaturestoreRequest', - 'ImportFeatureValuesOperationMetadata', - 'ImportFeatureValuesRequest', - 'ImportFeatureValuesResponse', - 'ListEntityTypesRequest', - 'ListEntityTypesResponse', - 'ListFeaturesRequest', - 'ListFeaturesResponse', - 'ListFeaturestoresRequest', - 'ListFeaturestoresResponse', - 'SearchFeaturesRequest', - 'SearchFeaturesResponse', - 'UpdateEntityTypeRequest', - 'UpdateFeatureRequest', - 'UpdateFeaturestoreOperationMetadata', - 'UpdateFeaturestoreRequest', - 'HyperparameterTuningJob', - 'Index', - 'DeployedIndex', - 'DeployedIndexAuthConfig', - 'IndexEndpoint', - 'IndexPrivateEndpoints', - 'CreateIndexEndpointOperationMetadata', - 'CreateIndexEndpointRequest', - 'DeleteIndexEndpointRequest', - 'DeployIndexOperationMetadata', - 'DeployIndexRequest', - 'DeployIndexResponse', - 'GetIndexEndpointRequest', - 'ListIndexEndpointsRequest', - 'ListIndexEndpointsResponse', - 'UndeployIndexOperationMetadata', - 'UndeployIndexRequest', - 'UndeployIndexResponse', - 'UpdateIndexEndpointRequest', - 'CreateIndexOperationMetadata', - 'CreateIndexRequest', - 'DeleteIndexRequest', - 'GetIndexRequest', - 'ListIndexesRequest', - 'ListIndexesResponse', - 'NearestNeighborSearchOperationMetadata', - 'UpdateIndexOperationMetadata', - 'UpdateIndexRequest', - 'AvroSource', - 'BigQueryDestination', - 'BigQuerySource', - 'ContainerRegistryDestination', - 'CsvDestination', - 'CsvSource', - 'GcsDestination', - 'GcsSource', - 'TFRecordDestination', - 'CancelBatchPredictionJobRequest', - 'CancelCustomJobRequest', - 'CancelDataLabelingJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'CreateCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'CreateModelDeploymentMonitoringJobRequest', - 'DeleteBatchPredictionJobRequest', - 'DeleteCustomJobRequest', - 'DeleteDataLabelingJobRequest', - 'DeleteHyperparameterTuningJobRequest', - 'DeleteModelDeploymentMonitoringJobRequest', - 'GetBatchPredictionJobRequest', - 'GetCustomJobRequest', - 'GetDataLabelingJobRequest', - 'GetHyperparameterTuningJobRequest', - 'GetModelDeploymentMonitoringJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'ListModelDeploymentMonitoringJobsRequest', - 'ListModelDeploymentMonitoringJobsResponse', - 'PauseModelDeploymentMonitoringJobRequest', - 'ResumeModelDeploymentMonitoringJobRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', - 'UpdateModelDeploymentMonitoringJobOperationMetadata', - 'UpdateModelDeploymentMonitoringJobRequest', - 'JobState', - 'LineageSubgraph', - 'AutomaticResources', - 'AutoscalingMetricSpec', - 'BatchDedicatedResources', - 'DedicatedResources', - 'DiskSpec', - 'MachineSpec', - 'ResourcesConsumed', - 'ManualBatchTuningParameters', - 'MetadataSchema', - 'AddContextArtifactsAndExecutionsRequest', - 'AddContextArtifactsAndExecutionsResponse', - 'AddContextChildrenRequest', - 'AddContextChildrenResponse', - 'AddExecutionEventsRequest', - 'AddExecutionEventsResponse', - 'CreateArtifactRequest', - 'CreateContextRequest', - 'CreateExecutionRequest', - 'CreateMetadataSchemaRequest', - 'CreateMetadataStoreOperationMetadata', - 'CreateMetadataStoreRequest', - 'DeleteContextRequest', - 'DeleteMetadataStoreOperationMetadata', - 'DeleteMetadataStoreRequest', - 'GetArtifactRequest', - 'GetContextRequest', - 'GetExecutionRequest', - 'GetMetadataSchemaRequest', - 'GetMetadataStoreRequest', - 'ListArtifactsRequest', - 'ListArtifactsResponse', - 'ListContextsRequest', - 'ListContextsResponse', - 'ListExecutionsRequest', - 'ListExecutionsResponse', - 'ListMetadataSchemasRequest', - 'ListMetadataSchemasResponse', - 'ListMetadataStoresRequest', - 'ListMetadataStoresResponse', - 'QueryArtifactLineageSubgraphRequest', - 'QueryContextLineageSubgraphRequest', - 'QueryExecutionInputsAndOutputsRequest', - 'UpdateArtifactRequest', - 'UpdateContextRequest', - 'UpdateExecutionRequest', - 'MetadataStore', - 'MigratableResource', - 'BatchMigrateResourcesOperationMetadata', - 'BatchMigrateResourcesRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceRequest', - 'MigrateResourceResponse', - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'Model', - 'ModelContainerSpec', - 'Port', - 'PredictSchemata', - 'ModelDeploymentMonitoringBigQueryTable', - 'ModelDeploymentMonitoringJob', - 'ModelDeploymentMonitoringObjectiveConfig', - 'ModelDeploymentMonitoringScheduleConfig', - 'ModelMonitoringStatsAnomalies', - 'ModelDeploymentMonitoringObjectiveType', - 'ModelEvaluation', - 'ModelEvaluationSlice', - 'ModelMonitoringAlertConfig', - 'ModelMonitoringObjectiveConfig', - 'SamplingStrategy', - 'ThresholdConfig', - 'DeleteModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelRequest', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'GetModelEvaluationSliceRequest', - 'GetModelRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelRequest', - 'UploadModelResponse', - 'DeleteOperationMetadata', - 'GenericOperationMetadata', - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - 'CancelPipelineJobRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'CreateTrainingPipelineRequest', - 'DeletePipelineJobRequest', - 'DeleteTrainingPipelineRequest', - 'GetPipelineJobRequest', - 'GetTrainingPipelineRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'PipelineState', - 'ExplainRequest', - 'ExplainResponse', - 'PredictRequest', - 'PredictResponse', - 'SpecialistPool', - 'CreateSpecialistPoolOperationMetadata', - 'CreateSpecialistPoolRequest', - 'DeleteSpecialistPoolRequest', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'UpdateSpecialistPoolOperationMetadata', - 'UpdateSpecialistPoolRequest', - 'Measurement', - 'Study', - 'StudySpec', - 'Trial', - 'Tensorboard', - 'Scalar', - 'TensorboardBlob', - 'TensorboardBlobSequence', - 'TensorboardTensor', - 'TimeSeriesData', - 'TimeSeriesDataPoint', - 'TensorboardExperiment', - 'TensorboardRun', - 'CreateTensorboardExperimentRequest', - 'CreateTensorboardOperationMetadata', - 'CreateTensorboardRequest', - 'CreateTensorboardRunRequest', - 'CreateTensorboardTimeSeriesRequest', - 'DeleteTensorboardExperimentRequest', - 'DeleteTensorboardRequest', - 'DeleteTensorboardRunRequest', - 'DeleteTensorboardTimeSeriesRequest', - 'ExportTensorboardTimeSeriesDataRequest', - 'ExportTensorboardTimeSeriesDataResponse', - 'GetTensorboardExperimentRequest', - 'GetTensorboardRequest', - 'GetTensorboardRunRequest', - 'GetTensorboardTimeSeriesRequest', - 'ListTensorboardExperimentsRequest', - 'ListTensorboardExperimentsResponse', - 'ListTensorboardRunsRequest', - 'ListTensorboardRunsResponse', - 'ListTensorboardsRequest', - 'ListTensorboardsResponse', - 'ListTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesResponse', - 'ReadTensorboardBlobDataRequest', - 'ReadTensorboardBlobDataResponse', - 'ReadTensorboardTimeSeriesDataRequest', - 'ReadTensorboardTimeSeriesDataResponse', - 'UpdateTensorboardExperimentRequest', - 'UpdateTensorboardOperationMetadata', - 'UpdateTensorboardRequest', - 'UpdateTensorboardRunRequest', - 'UpdateTensorboardTimeSeriesRequest', - 'WriteTensorboardRunDataRequest', - 'WriteTensorboardRunDataResponse', - 'TensorboardTimeSeries', - 'FilterSplit', - 'FractionSplit', - 'InputDataConfig', - 'PredefinedSplit', - 'TimestampSplit', - 'TrainingPipeline', - 'BoolArray', - 'DoubleArray', - 'Int64Array', - 'StringArray', - 'UserActionReference', - 'Value', - 'AddTrialMeasurementRequest', - 'CheckTrialEarlyStoppingStateMetatdata', - 'CheckTrialEarlyStoppingStateRequest', - 'CheckTrialEarlyStoppingStateResponse', - 'CompleteTrialRequest', - 'CreateStudyRequest', - 'CreateTrialRequest', - 'DeleteStudyRequest', - 'DeleteTrialRequest', - 'GetStudyRequest', - 'GetTrialRequest', - 'ListOptimalTrialsRequest', - 'ListOptimalTrialsResponse', - 'ListStudiesRequest', - 'ListStudiesResponse', - 'ListTrialsRequest', - 'ListTrialsResponse', - 'LookupStudyRequest', - 'StopTrialRequest', - 'SuggestTrialsMetadata', - 'SuggestTrialsRequest', - 'SuggestTrialsResponse', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py deleted file mode 100644 index 3e2b8a46f4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'AcceleratorType', - }, -) - - -class AcceleratorType(proto.Enum): - r"""Represents a hardware accelerator type.""" - ACCELERATOR_TYPE_UNSPECIFIED = 0 - NVIDIA_TESLA_K80 = 1 - NVIDIA_TESLA_P100 = 2 - NVIDIA_TESLA_V100 = 3 - NVIDIA_TESLA_P4 = 4 - NVIDIA_TESLA_T4 = 5 - NVIDIA_TESLA_A100 = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py deleted file mode 100644 index 526bcad4d2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import user_action_reference -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Annotation', - }, -) - - -class Annotation(proto.Message): - r"""Used to assign specific AnnotationSpec to a particular area - of a DataItem or the whole part of the DataItem. - - Attributes: - name (str): - Output only. Resource name of the Annotation. - payload_schema_uri (str): - Required. Google Cloud Storage URI points to a YAML file - describing - [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. - The schema is defined as an `OpenAPI 3.0.2 Schema - Object `__. - The schema files that can be used here are found in - gs://google-cloud-aiplatform/schema/dataset/annotation/, - note that the chosen schema must be consistent with the - parent Dataset's - [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]. - payload (google.protobuf.struct_pb2.Value): - Required. The schema of the payload can be found in - [payload_schema][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Annotation - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Annotation - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - annotation_source (google.cloud.aiplatform_v1beta1.types.UserActionReference): - Output only. The source of the Annotation. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Annotation.LabelsEntry]): - Optional. The labels with user-defined metadata to organize - your Annotations. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Annotation(System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Annotation: - - - "aiplatform.googleapis.com/annotation_set_name": - optional, name of the UI's annotation set this Annotation - belongs to. If not set, the Annotation is not visible in - the UI. - - - "aiplatform.googleapis.com/payload_schema": output only, - its value is the - [payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri] - title. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - payload_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - payload = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - annotation_source = proto.Field( - proto.MESSAGE, - number=5, - message=user_action_reference.UserActionReference, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py deleted file mode 100644 index a254682a5c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'AnnotationSpec', - }, -) - - -class AnnotationSpec(proto.Message): - r"""Identifies a concept with which DataItems may be annotated - with. - - Attributes: - name (str): - Output only. Resource name of the - AnnotationSpec. - display_name (str): - Required. The user-defined name of the - AnnotationSpec. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - AnnotationSpec was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when AnnotationSpec - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py deleted file mode 100644 index a7275436e3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py +++ /dev/null @@ -1,151 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Artifact', - }, -) - - -class Artifact(proto.Message): - r"""Instance of a general artifact. - Attributes: - name (str): - Output only. The resource name of the - Artifact. - display_name (str): - User provided display name of the Artifact. - May be up to 128 Unicode characters. - uri (str): - The uniform resource identifier of the - artifact file. May be empty if there is no - actual artifact file. - etag (str): - An eTag used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact.LabelsEntry]): - The labels with user-defined metadata to - organize your Artifacts. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Artifact (System labels are - excluded). - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Artifact was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Artifact was - last updated. - state (google.cloud.aiplatform_v1beta1.types.Artifact.State): - The state of this Artifact. This is a - property of the Artifact, and does not imply or - capture any ongoing process. This property is - managed by clients (such as Vertex Pipelines), - and the system does not prescribe or check the - validity of state transitions. - schema_title (str): - The title of the schema describing the - metadata. - Schema title and version is expected to be - registered in earlier Create Schema calls. And - both are used together as unique identifiers to - identify schemas within the local metadata - store. - schema_version (str): - The version of the schema in schema_name to use. - - Schema title and version is expected to be registered in - earlier Create Schema calls. And both are used together as - unique identifiers to identify schemas within the local - metadata store. - metadata (google.protobuf.struct_pb2.Struct): - Properties of the Artifact. - description (str): - Description of the Artifact - """ - class State(proto.Enum): - r"""Describes the state of the Artifact.""" - STATE_UNSPECIFIED = 0 - PENDING = 1 - LIVE = 2 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - uri = proto.Field( - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=10, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - state = proto.Field( - proto.ENUM, - number=13, - enum=State, - ) - schema_title = proto.Field( - proto.STRING, - number=14, - ) - schema_version = proto.Field( - proto.STRING, - number=15, - ) - metadata = proto.Field( - proto.MESSAGE, - number=16, - message=struct_pb2.Struct, - ) - description = proto.Field( - proto.STRING, - number=17, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py deleted file mode 100644 index a03181e64d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ /dev/null @@ -1,449 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import completion_stats as gca_completion_stats -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'BatchPredictionJob', - }, -) - - -class BatchPredictionJob(proto.Message): - r"""A job that uses a - [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to - produce predictions on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the instances fail, the - job may finish without attempting predictions for all remaining - instances. - - Attributes: - name (str): - Output only. Resource name of the - BatchPredictionJob. - display_name (str): - Required. The user-defined name of this - BatchPredictionJob. - model (str): - Required. The name of the Model that produces - the predictions via this job, must share the - same ancestor Location. Starting this job has no - impact on any existing deployments of the Model - and their resources. - input_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.InputConfig): - Required. Input configuration of the instances on which - predictions are performed. The schema of any single instance - may be specified via the - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - model_parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the predictions. The schema of - the parameters may be specified via the - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - output_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputConfig): - Required. The Configuration specifying where output - predictions should be written. The schema of any single - prediction may be specified as a concatenation of - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - and - [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. - dedicated_resources (google.cloud.aiplatform_v1beta1.types.BatchDedicatedResources): - The config of resources used by the Model during the batch - prediction. If the Model - [supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types] - DEDICATED_RESOURCES this config may be provided (and the job - will use these resources), if the Model doesn't support - AUTOMATIC_RESOURCES, this config must be provided. - manual_batch_tuning_parameters (google.cloud.aiplatform_v1beta1.types.ManualBatchTuningParameters): - Immutable. Parameters configuring the batch behavior. - Currently only applicable when - [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] - are used (in other cases Vertex AI does the tuning itself). - generate_explanation (bool): - Generate explanation with the batch prediction results. - - When set to ``true``, the batch prediction output changes - based on the ``predictions_format`` field of the - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config] - object: - - - ``bigquery``: output includes a column named - ``explanation``. The value is a struct that conforms to - the - [Explanation][google.cloud.aiplatform.v1beta1.Explanation] - object. - - ``jsonl``: The JSON objects on each line include an - additional entry keyed ``explanation``. The value of the - entry is a JSON object that conforms to the - [Explanation][google.cloud.aiplatform.v1beta1.Explanation] - object. - - ``csv``: Generating explanations for CSV format is not - supported. - - If this field is set to true, either the - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] - or - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - must be populated. - explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): - Explanation configuration for this BatchPredictionJob. Can - be specified only if - [generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation] - is set to ``true``. - - This value overrides the value of - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. - All fields of - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - are optional in the request. If a field of the - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - object is not populated, the corresponding field of the - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] - object is inherited. - output_info (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputInfo): - Output only. Information further describing - the output of this job. - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the job. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the job's state is - JOB_STATE_FAILED or JOB_STATE_CANCELLED. - partial_failures (Sequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - For example, single files that can't be read. - This field never exceeds 20 entries. - Status details fields contain standard GCP error - details. - resources_consumed (google.cloud.aiplatform_v1beta1.types.ResourcesConsumed): - Output only. Information about resources that - had been consumed by this job. Provided in real - time at best effort basis, as well as a final - value once the job completes. - - Note: This field currently may be not populated - for batch predictions that use AutoML Models. - completion_stats (google.cloud.aiplatform_v1beta1.types.CompletionStats): - Output only. Statistics on completed and - failed prediction instances. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob - was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob for the first - time entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob entered any of - the following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.LabelsEntry]): - The labels with user-defined metadata to - organize BatchPredictionJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key options for a - BatchPredictionJob. If this is set, then all - resources created by the BatchPredictionJob will - be encrypted with the provided encryption key. - """ - - class InputConfig(proto.Message): - r"""Configures the input to - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - See - [Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] - for Model's supported input formats, and how instances should be - expressed via any of them. - - Attributes: - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - The Cloud Storage location for the input - instances. - bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): - The BigQuery location of the input table. - The schema of the table should be in the format - described by the given context OpenAPI Schema, - if one is provided. The table may contain - additional columns that are not described by the - schema, and they will be ignored. - instances_format (str): - Required. The format in which instances are given, must be - one of the - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=2, - oneof='source', - message=io.GcsSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=3, - oneof='source', - message=io.BigQuerySource, - ) - instances_format = proto.Field( - proto.STRING, - number=1, - ) - - class OutputConfig(proto.Message): - r"""Configures the output of - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - See - [Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats] - for supported output formats, and how predictions are expressed via - any of them. - - Attributes: - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Cloud Storage location of the directory where the output - is to be written to. In the given directory a new directory - is created. Its name is - ``prediction--``, where - timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. - Inside of it files ``predictions_0001.``, - ``predictions_0002.``, ..., - ``predictions_N.`` are created where - ```` depends on chosen - [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format], - and N may equal 0001 and depends on the total number of - successfully predicted instances. If the Model has both - [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - and - [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] - schemata defined then each such file contains predictions as - per the - [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format]. - If prediction for any instance failed (partially or - completely), then an additional ``errors_0001.``, - ``errors_0002.``,..., ``errors_N.`` - files are created (N depends on total number of failed - predictions). These files contain the failed instances, as - per their schema, followed by an additional ``error`` field - which as value has ```google.rpc.Status`` `__ - containing only ``code`` and ``message`` fields. - bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): - The BigQuery project or dataset location where the output is - to be written to. If project is provided, a new dataset is - created with name - ``prediction__`` where - is made BigQuery-dataset-name compatible (for example, most - special characters become underscores), and timestamp is in - YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the - dataset two tables will be created, ``predictions``, and - ``errors``. If the Model has both - [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - and - [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] - schemata defined then the tables have columns as follows: - The ``predictions`` table contains instances for which the - prediction succeeded, it has columns as per a concatenation - of the Model's instance and prediction schemata. The - ``errors`` table contains rows for which the prediction has - failed, it has instance columns, as per the instance schema, - followed by a single "errors" column, which as values has - ```google.rpc.Status`` `__ represented as a STRUCT, - and containing only ``code`` and ``message``. - predictions_format (str): - Required. The format in which Vertex AI gives the - predictions, must be one of the - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message=io.GcsDestination, - ) - bigquery_destination = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message=io.BigQueryDestination, - ) - predictions_format = proto.Field( - proto.STRING, - number=1, - ) - - class OutputInfo(proto.Message): - r"""Further describes this job's output. Supplements - [output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. - - Attributes: - gcs_output_directory (str): - Output only. The full path of the Cloud - Storage directory created, into which the - prediction output is written. - bigquery_output_dataset (str): - Output only. The path of the BigQuery dataset created, in - ``bq://projectId.bqDatasetId`` format, into which the - prediction output is written. - """ - - gcs_output_directory = proto.Field( - proto.STRING, - number=1, - oneof='output_location', - ) - bigquery_output_dataset = proto.Field( - proto.STRING, - number=2, - oneof='output_location', - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - model = proto.Field( - proto.STRING, - number=3, - ) - input_config = proto.Field( - proto.MESSAGE, - number=4, - message=InputConfig, - ) - model_parameters = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.Value, - ) - output_config = proto.Field( - proto.MESSAGE, - number=6, - message=OutputConfig, - ) - dedicated_resources = proto.Field( - proto.MESSAGE, - number=7, - message=machine_resources.BatchDedicatedResources, - ) - manual_batch_tuning_parameters = proto.Field( - proto.MESSAGE, - number=8, - message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters, - ) - generate_explanation = proto.Field( - proto.BOOL, - number=23, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=25, - message=explanation.ExplanationSpec, - ) - output_info = proto.Field( - proto.MESSAGE, - number=9, - message=OutputInfo, - ) - state = proto.Field( - proto.ENUM, - number=10, - enum=job_state.JobState, - ) - error = proto.Field( - proto.MESSAGE, - number=11, - message=status_pb2.Status, - ) - partial_failures = proto.RepeatedField( - proto.MESSAGE, - number=12, - message=status_pb2.Status, - ) - resources_consumed = proto.Field( - proto.MESSAGE, - number=13, - message=machine_resources.ResourcesConsumed, - ) - completion_stats = proto.Field( - proto.MESSAGE, - number=14, - message=gca_completion_stats.CompletionStats, - ) - create_time = proto.Field( - proto.MESSAGE, - number=15, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=16, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=17, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=18, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=19, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=24, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py deleted file mode 100644 index 3d8055f95a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CompletionStats', - }, -) - - -class CompletionStats(proto.Message): - r"""Success and error statistics of processing multiple entities - (for example, DataItems or structured data rows) in batch. - - Attributes: - successful_count (int): - Output only. The number of entities that had - been processed successfully. - failed_count (int): - Output only. The number of entities for which - any error was encountered. - incomplete_count (int): - Output only. In cases when enough errors are - encountered a job, pipeline, or operation may be - failed as a whole. Below is the number of - entities for which the processing had not been - finished (either in successful or failed state). - Set to -1 if the number is unknown (for example, - the operation failed before the total entity - number could be collected). - """ - - successful_count = proto.Field( - proto.INT64, - number=1, - ) - failed_count = proto.Field( - proto.INT64, - number=2, - ) - incomplete_count = proto.Field( - proto.INT64, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py deleted file mode 100644 index 607b44cee0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py +++ /dev/null @@ -1,134 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Context', - }, -) - - -class Context(proto.Message): - r"""Instance of a general context. - Attributes: - name (str): - Output only. The resource name of the - Context. - display_name (str): - User provided display name of the Context. - May be up to 128 Unicode characters. - etag (str): - An eTag used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Context.LabelsEntry]): - The labels with user-defined metadata to - organize your Contexts. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Context (System labels are - excluded). - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Context was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Context was - last updated. - parent_contexts (Sequence[str]): - Output only. A list of resource names of Contexts that are - parents of this Context. A Context may have at most 10 - parent_contexts. - schema_title (str): - The title of the schema describing the - metadata. - Schema title and version is expected to be - registered in earlier Create Schema calls. And - both are used together as unique identifiers to - identify schemas within the local metadata - store. - schema_version (str): - The version of the schema in schema_name to use. - - Schema title and version is expected to be registered in - earlier Create Schema calls. And both are used together as - unique identifiers to identify schemas within the local - metadata store. - metadata (google.protobuf.struct_pb2.Struct): - Properties of the Context. - description (str): - Description of the Context - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=9, - ) - create_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - parent_contexts = proto.RepeatedField( - proto.STRING, - number=12, - ) - schema_title = proto.Field( - proto.STRING, - number=13, - ) - schema_version = proto.Field( - proto.STRING, - number=14, - ) - metadata = proto.Field( - proto.MESSAGE, - number=15, - message=struct_pb2.Struct, - ) - description = proto.Field( - proto.STRING, - number=16, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py deleted file mode 100644 index 1d7aef0b77..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CustomJob', - 'CustomJobSpec', - 'WorkerPoolSpec', - 'ContainerSpec', - 'PythonPackageSpec', - 'Scheduling', - }, -) - - -class CustomJob(proto.Message): - r"""Represents a job that runs custom workloads such as a Docker - container or a Python package. A CustomJob can have multiple - worker pools and each worker pool can have its own machine and - input spec. A CustomJob will be cleaned up once the job enters - terminal state (failed or succeeded). - - Attributes: - name (str): - Output only. Resource name of a CustomJob. - display_name (str): - Required. The display name of the CustomJob. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): - Required. Job spec. - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the job. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob was - created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob for the first time - entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob entered any of the - following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob was most - recently updated. - error (google.rpc.status_pb2.Status): - Output only. Only populated when job's state is - ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob.LabelsEntry]): - The labels with user-defined metadata to - organize CustomJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key options for a - CustomJob. If this is set, then all resources - created by the CustomJob will be encrypted with - the provided encryption key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - job_spec = proto.Field( - proto.MESSAGE, - number=4, - message='CustomJobSpec', - ) - state = proto.Field( - proto.ENUM, - number=5, - enum=job_state.JobState, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=12, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class CustomJobSpec(proto.Message): - r"""Represents the spec of a CustomJob. - Attributes: - worker_pool_specs (Sequence[google.cloud.aiplatform_v1beta1.types.WorkerPoolSpec]): - Required. The spec of the worker pools - including machine type and Docker image. All - worker pools except the first one are optional - and can be skipped by providing an empty value. - scheduling (google.cloud.aiplatform_v1beta1.types.Scheduling): - Scheduling options for a CustomJob. - service_account (str): - Specifies the service account for workload run-as account. - Users submitting jobs must have act-as permission on this - run-as account. If unspecified, the `AI Platform Custom Code - Service - Agent `__ - for the CustomJob's project is used. - network (str): - The full name of the Compute Engine - `network `__ - to which the Job should be peered. For example, - ``projects/12345/global/networks/myVPC``. - `Format `__ - is of the form - ``projects/{project}/global/networks/{network}``. Where - {project} is a project number, as in ``12345``, and - {network} is a network name. - - Private services access must already be configured for the - network. If left unspecified, the job is not peered with any - network. - base_output_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Cloud Storage location to store the output of this - CustomJob or HyperparameterTuningJob. For - HyperparameterTuningJob, the baseOutputDirectory of each - child CustomJob backing a Trial is set to a subdirectory of - name [id][google.cloud.aiplatform.v1beta1.Trial.id] under - its parent HyperparameterTuningJob's baseOutputDirectory. - - The following Vertex AI environment variables will be passed - to containers or python modules when this field is set: - - For CustomJob: - - - AIP_MODEL_DIR = ``/model/`` - - AIP_CHECKPOINT_DIR = - ``/checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``/logs/`` - - For CustomJob backing a Trial of HyperparameterTuningJob: - - - AIP_MODEL_DIR = - ``//model/`` - - AIP_CHECKPOINT_DIR = - ``//checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``//logs/`` - tensorboard (str): - Optional. The name of a Vertex AI - [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] - resource to which this CustomJob will upload Tensorboard - logs. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - worker_pool_specs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='WorkerPoolSpec', - ) - scheduling = proto.Field( - proto.MESSAGE, - number=3, - message='Scheduling', - ) - service_account = proto.Field( - proto.STRING, - number=4, - ) - network = proto.Field( - proto.STRING, - number=5, - ) - base_output_directory = proto.Field( - proto.MESSAGE, - number=6, - message=io.GcsDestination, - ) - tensorboard = proto.Field( - proto.STRING, - number=7, - ) - - -class WorkerPoolSpec(proto.Message): - r"""Represents the spec of a worker pool in a job. - Attributes: - container_spec (google.cloud.aiplatform_v1beta1.types.ContainerSpec): - The custom container task. - python_package_spec (google.cloud.aiplatform_v1beta1.types.PythonPackageSpec): - The Python packaged task. - machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): - Optional. Immutable. The specification of a - single machine. - replica_count (int): - Optional. The number of worker replicas to - use for this worker pool. - disk_spec (google.cloud.aiplatform_v1beta1.types.DiskSpec): - Disk spec. - """ - - container_spec = proto.Field( - proto.MESSAGE, - number=6, - oneof='task', - message='ContainerSpec', - ) - python_package_spec = proto.Field( - proto.MESSAGE, - number=7, - oneof='task', - message='PythonPackageSpec', - ) - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message=machine_resources.MachineSpec, - ) - replica_count = proto.Field( - proto.INT64, - number=2, - ) - disk_spec = proto.Field( - proto.MESSAGE, - number=5, - message=machine_resources.DiskSpec, - ) - - -class ContainerSpec(proto.Message): - r"""The spec of a Container. - Attributes: - image_uri (str): - Required. The URI of a container image in the - Container Registry that is to be run on each - worker replica. - command (Sequence[str]): - The command to be invoked when the container - is started. It overrides the entrypoint - instruction in Dockerfile when provided. - args (Sequence[str]): - The arguments to be passed when starting the - container. - """ - - image_uri = proto.Field( - proto.STRING, - number=1, - ) - command = proto.RepeatedField( - proto.STRING, - number=2, - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class PythonPackageSpec(proto.Message): - r"""The spec of a Python packaged code. - Attributes: - executor_image_uri (str): - Required. The URI of a container image in Artifact Registry - that will run the provided Python package. Vertex AI - provides a wide range of executor images with pre-installed - packages to meet users' various use cases. See the list of - `pre-built containers for - training `__. - You must use an image from this list. - package_uris (Sequence[str]): - Required. The Google Cloud Storage location - of the Python package files which are the - training program and its dependent packages. The - maximum number of package URIs is 100. - python_module (str): - Required. The Python module name to run after - installing the packages. - args (Sequence[str]): - Command line arguments to be passed to the - Python task. - """ - - executor_image_uri = proto.Field( - proto.STRING, - number=1, - ) - package_uris = proto.RepeatedField( - proto.STRING, - number=2, - ) - python_module = proto.Field( - proto.STRING, - number=3, - ) - args = proto.RepeatedField( - proto.STRING, - number=4, - ) - - -class Scheduling(proto.Message): - r"""All parameters related to queuing and scheduling of custom - jobs. - - Attributes: - timeout (google.protobuf.duration_pb2.Duration): - The maximum job running time. The default is - 7 days. - restart_job_on_worker_restart (bool): - Restarts the entire CustomJob if a worker - gets restarted. This feature can be used by - distributed training jobs that are not resilient - to workers leaving and joining a job. - """ - - timeout = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - restart_job_on_worker_restart = proto.Field( - proto.BOOL, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py deleted file mode 100644 index c638c0e00d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'DataItem', - }, -) - - -class DataItem(proto.Message): - r"""A piece of data in a Dataset. Could be an image, a video, a - document or plain text. - - Attributes: - name (str): - Output only. The resource name of the - DataItem. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this DataItem was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this DataItem was - last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataItem.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your DataItems. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one DataItem(System labels are - excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - payload (google.protobuf.struct_pb2.Value): - Required. The data that the DataItem represents (for - example, an image or a text snippet). The schema of the - payload is stored in the parent Dataset's [metadata - schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] - dataItemSchemaUri field. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=3, - ) - payload = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py deleted file mode 100644 index 27f661c920..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py +++ /dev/null @@ -1,332 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import job_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'DataLabelingJob', - 'ActiveLearningConfig', - 'SampleConfig', - 'TrainingConfig', - }, -) - - -class DataLabelingJob(proto.Message): - r"""DataLabelingJob is used to trigger a human labeling job on - unlabeled data from the following Dataset: - - Attributes: - name (str): - Output only. Resource name of the - DataLabelingJob. - display_name (str): - Required. The user-defined name of the - DataLabelingJob. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - Display name of a DataLabelingJob. - datasets (Sequence[str]): - Required. Dataset resource names. Right now we only support - labeling from a single Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - annotation_labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob.AnnotationLabelsEntry]): - Labels to assign to annotations generated by - this DataLabelingJob. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. See https://goo.gl/xmQnxf for more - information and examples of labels. System - reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - labeler_count (int): - Required. Number of labelers to work on each - DataItem. - instruction_uri (str): - Required. The Google Cloud Storage location - of the instruction pdf. This pdf is shared with - labelers, and provides detailed description on - how to label DataItems in Datasets. - inputs_schema_uri (str): - Required. Points to a YAML file stored on - Google Cloud Storage describing the config for a - specific type of DataLabelingJob. The schema - files that can be used here are found in the - https://storage.googleapis.com/google-cloud- - aiplatform bucket in the - /schema/datalabelingjob/inputs/ folder. - inputs (google.protobuf.struct_pb2.Value): - Required. Input config parameters for the - DataLabelingJob. - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the job. - labeling_progress (int): - Output only. Current labeling job progress percentage scaled - in interval [0, 100], indicating the percentage of DataItems - that has been finished. - current_spend (google.type.money_pb2.Money): - Output only. Estimated cost(in US dollars) - that the DataLabelingJob has incurred to date. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - DataLabelingJob was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - DataLabelingJob was updated most recently. - error (google.rpc.status_pb2.Status): - Output only. DataLabelingJob errors. It is only populated - when job's state is ``JOB_STATE_FAILED`` or - ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob.LabelsEntry]): - The labels with user-defined metadata to organize your - DataLabelingJobs. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each DataLabelingJob: - - - "aiplatform.googleapis.com/schema": output only, its - value is the - [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s - title. - specialist_pools (Sequence[str]): - The SpecialistPools' resource names - associated with this job. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - DataLabelingJob. If set, this DataLabelingJob - will be secured by this key. - Note: Annotations created in the DataLabelingJob - are associated with the EncryptionSpec of the - Dataset they are exported to. - active_learning_config (google.cloud.aiplatform_v1beta1.types.ActiveLearningConfig): - Parameters that configure the active learning - pipeline. Active learning will label the data - incrementally via several iterations. For every - iteration, it will select a batch of data based - on the sampling strategy. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - datasets = proto.RepeatedField( - proto.STRING, - number=3, - ) - annotation_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=12, - ) - labeler_count = proto.Field( - proto.INT32, - number=4, - ) - instruction_uri = proto.Field( - proto.STRING, - number=5, - ) - inputs_schema_uri = proto.Field( - proto.STRING, - number=6, - ) - inputs = proto.Field( - proto.MESSAGE, - number=7, - message=struct_pb2.Value, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=job_state.JobState, - ) - labeling_progress = proto.Field( - proto.INT32, - number=13, - ) - current_spend = proto.Field( - proto.MESSAGE, - number=14, - message=money_pb2.Money, - ) - create_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=22, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - specialist_pools = proto.RepeatedField( - proto.STRING, - number=16, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=20, - message=gca_encryption_spec.EncryptionSpec, - ) - active_learning_config = proto.Field( - proto.MESSAGE, - number=21, - message='ActiveLearningConfig', - ) - - -class ActiveLearningConfig(proto.Message): - r"""Parameters that configure the active learning pipeline. - Active learning will label the data incrementally by several - iterations. For every iteration, it will select a batch of data - based on the sampling strategy. - - Attributes: - max_data_item_count (int): - Max number of human labeled DataItems. - max_data_item_percentage (int): - Max percent of total DataItems for human - labeling. - sample_config (google.cloud.aiplatform_v1beta1.types.SampleConfig): - Active learning data sampling config. For - every active learning labeling iteration, it - will select a batch of data based on the - sampling strategy. - training_config (google.cloud.aiplatform_v1beta1.types.TrainingConfig): - CMLE training config. For every active - learning labeling iteration, system will train a - machine learning model on CMLE. The trained - model will be used by data sampling algorithm to - select DataItems. - """ - - max_data_item_count = proto.Field( - proto.INT64, - number=1, - oneof='human_labeling_budget', - ) - max_data_item_percentage = proto.Field( - proto.INT32, - number=2, - oneof='human_labeling_budget', - ) - sample_config = proto.Field( - proto.MESSAGE, - number=3, - message='SampleConfig', - ) - training_config = proto.Field( - proto.MESSAGE, - number=4, - message='TrainingConfig', - ) - - -class SampleConfig(proto.Message): - r"""Active learning data sampling config. For every active - learning labeling iteration, it will select a batch of data - based on the sampling strategy. - - Attributes: - initial_batch_sample_percentage (int): - The percentage of data needed to be labeled - in the first batch. - following_batch_sample_percentage (int): - The percentage of data needed to be labeled - in each following batch (except the first - batch). - sample_strategy (google.cloud.aiplatform_v1beta1.types.SampleConfig.SampleStrategy): - Field to choose sampling strategy. Sampling - strategy will decide which data should be - selected for human labeling in every batch. - """ - class SampleStrategy(proto.Enum): - r"""Sample strategy decides which subset of DataItems should be - selected for human labeling in every batch. - """ - SAMPLE_STRATEGY_UNSPECIFIED = 0 - UNCERTAINTY = 1 - - initial_batch_sample_percentage = proto.Field( - proto.INT32, - number=1, - oneof='initial_batch_sample_size', - ) - following_batch_sample_percentage = proto.Field( - proto.INT32, - number=3, - oneof='following_batch_sample_size', - ) - sample_strategy = proto.Field( - proto.ENUM, - number=5, - enum=SampleStrategy, - ) - - -class TrainingConfig(proto.Message): - r"""CMLE training config. For every active learning labeling - iteration, system will train a machine learning model on CMLE. - The trained model will be used by data sampling algorithm to - select DataItems. - - Attributes: - timeout_training_milli_hours (int): - The timeout hours for the CMLE training job, - expressed in milli hours i.e. 1,000 value in - this field means 1 hour. - """ - - timeout_training_milli_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py deleted file mode 100644 index a731843354..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py +++ /dev/null @@ -1,220 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import io -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Dataset', - 'ImportDataConfig', - 'ExportDataConfig', - }, -) - - -class Dataset(proto.Message): - r"""A collection of DataItems and Annotations on them. - Attributes: - name (str): - Output only. The resource name of the - Dataset. - display_name (str): - Required. The user-defined name of the - Dataset. The name can be up to 128 characters - long and can be consist of any UTF-8 characters. - metadata_schema_uri (str): - Required. Points to a YAML file stored on - Google Cloud Storage describing additional - information about the Dataset. The schema is - defined as an OpenAPI 3.0.2 Schema Object. The - schema files that can be used here are found in - gs://google-cloud- - aiplatform/schema/dataset/metadata/. - metadata (google.protobuf.struct_pb2.Value): - Required. Additional information about the - Dataset. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Dataset was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Dataset was - last updated. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset.LabelsEntry]): - The labels with user-defined metadata to organize your - Datasets. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Dataset (System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Dataset: - - - "aiplatform.googleapis.com/dataset_metadata_schema": - output only, its value is the - [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] - title. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Dataset. If set, this Dataset and all sub- - resources of this Dataset will be secured by - this key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - metadata = proto.Field( - proto.MESSAGE, - number=8, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=6, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=11, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class ImportDataConfig(proto.Message): - r"""Describes the location from where we import data into a - Dataset, together with the labels that will be applied to the - DataItems and the Annotations. - - Attributes: - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - The Google Cloud Storage location for the - input content. - data_item_labels (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig.DataItemLabelsEntry]): - Labels that will be applied to newly imported DataItems. If - an identical DataItem as one being imported already exists - in the Dataset, then these labels will be appended to these - of the already existing one, and if labels with identical - key is imported before, the old label value will be - overwritten. If two DataItems are identical in the same - import data operation, the labels will be combined and if - key collision happens in this case, one of the values will - be picked randomly. Two DataItems are considered identical - if their content bytes are identical (e.g. image bytes or - pdf bytes). These labels will be overridden by Annotation - labels specified inside index file referenced by - [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri], - e.g. jsonl file. - import_schema_uri (str): - Required. Points to a YAML file stored on Google Cloud - Storage describing the import format. Validation will be - done against the schema. The schema is defined as an - `OpenAPI 3.0.2 Schema - Object `__. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - oneof='source', - message=io.GcsSource, - ) - data_item_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - import_schema_uri = proto.Field( - proto.STRING, - number=4, - ) - - -class ExportDataConfig(proto.Message): - r"""Describes what part of the Dataset is to be exported, the - destination of the export and how to export. - - Attributes: - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Google Cloud Storage location where the output is to be - written to. In the given directory a new directory will be - created with name: - ``export-data--`` - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format. All export output will be written into that - directory. Inside that directory, annotations with the same - schema will be grouped into sub directories which are named - with the corresponding annotations' schema title. Inside - these sub directories, a schema.yaml will be created to - describe the output format. - annotations_filter (str): - A filter on Annotations of the Dataset. Only Annotations on - to-be-exported DataItems(specified by [data_items_filter][]) - that match this filter will be exported. The filter syntax - is the same as in - [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message=io.GcsDestination, - ) - annotations_filter = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py deleted file mode 100644 index fa0a4f59c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py +++ /dev/null @@ -1,542 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateDatasetRequest', - 'CreateDatasetOperationMetadata', - 'GetDatasetRequest', - 'UpdateDatasetRequest', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'DeleteDatasetRequest', - 'ImportDataRequest', - 'ImportDataResponse', - 'ImportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'ExportDataOperationMetadata', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'GetAnnotationSpecRequest', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - }, -) - - -class CreateDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Dataset in. Format: - ``projects/{project}/locations/{location}`` - dataset (google.cloud.aiplatform_v1beta1.types.Dataset): - Required. The Dataset to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - dataset = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.Dataset, - ) - - -class CreateDatasetOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. - - Attributes: - name (str): - Required. The name of the Dataset resource. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. - - Attributes: - dataset (google.cloud.aiplatform_v1beta1.types.Dataset): - Required. The Dataset which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - """ - - dataset = proto.Field( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListDatasetsRequest(proto.Message): - r"""Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - - Attributes: - parent (str): - Required. The name of the Dataset's parent resource. Format: - ``projects/{project}/locations/{location}`` - filter (str): - An expression for filtering the results of the request. For - field names both snake_case and camelCase are supported. - - - ``display_name``: supports = and != - - ``metadata_schema_uri``: supports = and != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``display_name`` - - ``create_time`` - - ``update_time`` - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDatasetsResponse(proto.Message): - r"""Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - - Attributes: - datasets (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset]): - A list of Datasets that matches the specified - filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - datasets = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. - - Attributes: - name (str): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ImportDataRequest(proto.Message): - r"""Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - - Attributes: - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - import_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]): - Required. The desired input locations. The - contents of all input locations will be imported - in one batch. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - import_configs = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=gca_dataset.ImportDataConfig, - ) - - -class ImportDataResponse(proto.Message): - r"""Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - """ - - -class ImportDataOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class ExportDataRequest(proto.Message): - r"""Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - Attributes: - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - export_config (google.cloud.aiplatform_v1beta1.types.ExportDataConfig): - Required. The desired output location. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - export_config = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.ExportDataConfig, - ) - - -class ExportDataResponse(proto.Message): - r"""Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - Attributes: - exported_files (Sequence[str]): - All of the files that are exported in this - export operation. - """ - - exported_files = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class ExportDataOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - gcs_output_directory (str): - A Google Cloud Storage directory which path - ends with '/'. The exported data is stored in - the directory. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - gcs_output_directory = proto.Field( - proto.STRING, - number=2, - ) - - -class ListDataItemsRequest(proto.Message): - r"""Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - - Attributes: - parent (str): - Required. The resource name of the Dataset to list DataItems - from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, - sorted in ascending order. Use "desc" after a - field name for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDataItemsResponse(proto.Message): - r"""Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - - Attributes: - data_items (Sequence[google.cloud.aiplatform_v1beta1.types.DataItem]): - A list of DataItems that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - data_items = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=data_item.DataItem, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetAnnotationSpecRequest(proto.Message): - r"""Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. - - Attributes: - name (str): - Required. The name of the AnnotationSpec resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListAnnotationsRequest(proto.Message): - r"""Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - - Attributes: - parent (str): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, - sorted in ascending order. Use "desc" after a - field name for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListAnnotationsResponse(proto.Message): - r"""Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - - Attributes: - annotations (Sequence[google.cloud.aiplatform_v1beta1.types.Annotation]): - A list of Annotations that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - annotations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=annotation.Annotation, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py deleted file mode 100644 index 234bfc9b59..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'DeployedIndexRef', - }, -) - - -class DeployedIndexRef(proto.Message): - r"""Points to a DeployedIndex. - Attributes: - index_endpoint (str): - Immutable. A resource name of the - IndexEndpoint. - deployed_index_id (str): - Immutable. The ID of the DeployedIndex in the - above IndexEndpoint. - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py deleted file mode 100644 index 2fb07a25bf..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'DeployedModelRef', - }, -) - - -class DeployedModelRef(proto.Message): - r"""Points to a DeployedModel. - Attributes: - endpoint (str): - Immutable. A resource name of an Endpoint. - deployed_model_id (str): - Immutable. An ID of a DeployedModel in the - above Endpoint. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py deleted file mode 100644 index ad7e6df830..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'EncryptionSpec', - }, -) - - -class EncryptionSpec(proto.Message): - r"""Represents a customer-managed encryption key spec that can be - applied to a top-level resource. - - Attributes: - kms_key_name (str): - Required. The Cloud KMS resource identifier of the customer - managed encryption key used to protect a resource. Has the - form: - ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. - The key needs to be in the same region as where the compute - resource is created. - """ - - kms_key_name = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py deleted file mode 100644 index c555d5e8e4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ /dev/null @@ -1,259 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Endpoint', - 'DeployedModel', - }, -) - - -class Endpoint(proto.Message): - r"""Models are deployed into it, and afterwards Endpoint is - called to obtain predictions and explanations. - - Attributes: - name (str): - Output only. The resource name of the - Endpoint. - display_name (str): - Required. The display name of the Endpoint. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Endpoint. - deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModel]): - Output only. The models deployed in this Endpoint. To add or - remove DeployedModels use - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] - and - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel] - respectively. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint.TrafficSplitEntry]): - A map from a DeployedModel's ID to the - percentage of this Endpoint's traffic that - should be forwarded to that DeployedModel. - If a DeployedModel's ID is not listed in this - map, then it receives no traffic. - - The traffic percentage values must add up to - 100, or map must be empty if the Endpoint is to - not accept any traffic at a moment. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint.LabelsEntry]): - The labels with user-defined metadata to - organize your Endpoints. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Endpoint was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Endpoint was - last updated. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for an - Endpoint. If set, this Endpoint and all sub- - resources of this Endpoint will be secured by - this key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - deployed_models = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='DeployedModel', - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=5, - ) - etag = proto.Field( - proto.STRING, - number=6, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - create_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=10, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class DeployedModel(proto.Message): - r"""A deployment of a Model. Endpoints contain one or more - DeployedModels. - - Attributes: - dedicated_resources (google.cloud.aiplatform_v1beta1.types.DedicatedResources): - A description of resources that are dedicated - to the DeployedModel, and that need a higher - degree of manual configuration. - automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): - A description of resources that to large - degree are decided by AI Platform, and require - only a modest additional configuration. - id (str): - Output only. The ID of the DeployedModel. - model (str): - Required. The name of the Model that this is - the deployment of. Note that the Model may be in - a different location than the DeployedModel's - Endpoint. - display_name (str): - The display name of the DeployedModel. If not provided upon - creation, the Model's display_name is used. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the DeployedModel - was created. - explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): - Explanation configuration for this DeployedModel. - - When deploying a Model using - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel], - this value overrides the value of - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. - All fields of - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - are optional in the request. If a field of - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - is not populated, the value of the same field of - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] - is inherited. If the corresponding - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] - is not populated, all fields of the - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - will be used for the explanation configuration. - service_account (str): - The service account that the DeployedModel's container runs - as. Specify the email address of the service account. If - this service account is not specified, the container runs as - a service account that doesn't have access to the resource - project. - - Users deploying the Model must have the - ``iam.serviceAccounts.actAs`` permission on this service - account. - enable_container_logging (bool): - If true, the container of the DeployedModel instances will - send ``stderr`` and ``stdout`` streams to Stackdriver - Logging. - - Only supported for custom-trained Models and AutoML Tabular - Models. - enable_access_logging (bool): - These logs are like standard server access - logs, containing information like timestamp and - latency for each prediction request. - Note that Stackdriver logs may incur a cost, - especially if your project receives prediction - requests at a high queries per second rate - (QPS). Estimate your costs before enabling this - option. - """ - - dedicated_resources = proto.Field( - proto.MESSAGE, - number=7, - oneof='prediction_resources', - message=machine_resources.DedicatedResources, - ) - automatic_resources = proto.Field( - proto.MESSAGE, - number=8, - oneof='prediction_resources', - message=machine_resources.AutomaticResources, - ) - id = proto.Field( - proto.STRING, - number=1, - ) - model = proto.Field( - proto.STRING, - number=2, - ) - display_name = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=9, - message=explanation.ExplanationSpec, - ) - service_account = proto.Field( - proto.STRING, - number=11, - ) - enable_container_logging = proto.Field( - proto.BOOL, - number=12, - ) - enable_access_logging = proto.Field( - proto.BOOL, - number=13, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py deleted file mode 100644 index 674662697d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateEndpointRequest', - 'CreateEndpointOperationMetadata', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UpdateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelRequest', - 'DeployModelResponse', - 'DeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UndeployModelOperationMetadata', - }, -) - - -class CreateEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Endpoint in. Format: - ``projects/{project}/locations/{location}`` - endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): - Required. The Endpoint to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - endpoint = proto.Field( - proto.MESSAGE, - number=2, - message=gca_endpoint.Endpoint, - ) - - -class CreateEndpointOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] - - Attributes: - name (str): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListEndpointsRequest(proto.Message): - r"""Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Optional. An expression for filtering the results of the - request. For field names both snake_case and camelCase are - supported. - - - ``endpoint`` supports = and !=. ``endpoint`` represents - the Endpoint ID, i.e. the last segment of the Endpoint's - [resource - name][google.cloud.aiplatform.v1beta1.Endpoint.name]. - - ``display_name`` supports = and, != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``endpoint=1`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - Optional. The standard list page size. - page_token (str): - Optional. The standard list page token. Typically obtained - via - [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListEndpointsResponse.next_page_token] - of the previous - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. Mask specifying which fields to - read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListEndpointsResponse(proto.Message): - r"""Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - - Attributes: - endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint]): - List of Endpoints in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListEndpointsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - endpoints = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_endpoint.Endpoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - - Attributes: - endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): - Required. The Endpoint which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - endpoint = proto.Field( - proto.MESSAGE, - number=1, - message=gca_endpoint.Endpoint, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. - - Attributes: - name (str): - Required. The name of the Endpoint resource to be deleted. - Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class DeployModelRequest(proto.Message): - r"""Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint resource into which to - deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]): - A map from a DeployedModel's ID to the percentage of this - Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the just - being deployed Model, a "0" should be used, and the actual - ID of the new DeployedModel will be filled in its place by - this method. The traffic percentage values must add up to - 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - is not updated. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model = proto.Field( - proto.MESSAGE, - number=2, - message=gca_endpoint.DeployedModel, - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=3, - ) - - -class DeployModelResponse(proto.Message): - r"""Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - Attributes: - deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): - The DeployedModel that had been deployed in - the Endpoint. - """ - - deployed_model = proto.Field( - proto.MESSAGE, - number=1, - message=gca_endpoint.DeployedModel, - ) - - -class DeployModelOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UndeployModelRequest(proto.Message): - r"""Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint resource from which to - undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - deployed_model_id (str): - Required. The ID of the DeployedModel to be - undeployed from the Endpoint. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is being - undeployed from the Endpoint, the [Endpoint.traffic_split] - will always end up empty when this call returns. A - DeployedModel will be successfully undeployed only if it - doesn't have any traffic assigned to it when this method - executes, or if this field unassigns any traffic to it. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=3, - ) - - -class UndeployModelResponse(proto.Message): - r"""Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - """ - - -class UndeployModelOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py deleted file mode 100644 index 550365e621..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py +++ /dev/null @@ -1,116 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'EntityType', - }, -) - - -class EntityType(proto.Message): - r"""An entity type is a type of object in a system that needs to - be modeled and have stored information about. For example, - driver is an entity type, and driver0 is an instance of an - entity type driver. - - Attributes: - name (str): - Immutable. Name of the EntityType. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - The last part entity_type is assigned by the client. The - entity_type can be up to 64 characters long and can consist - only of ASCII Latin letters A-Z and a-z and underscore(_), - and ASCII digits 0-9 starting with a letter. The value will - be unique given a featurestore. - description (str): - Optional. Description of the EntityType. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your EntityTypes. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - on and examples of labels. No more than 64 user - labels can be associated with one EntityType - (System labels are excluded)." - System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Optional. Used to perform a consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): - Optional. The default monitoring configuration for all - Features under this EntityType. - - If this is populated with - [FeaturestoreMonitoringConfig.monitoring_interval] - specified, snapshot analysis monitoring is enabled. - Otherwise, snapshot analysis monitoring is disabled. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - monitoring_config = proto.Field( - proto.MESSAGE, - number=8, - message=featurestore_monitoring.FeaturestoreMonitoringConfig, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py deleted file mode 100644 index 2775473b9e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'EnvVar', - }, -) - - -class EnvVar(proto.Message): - r"""Represents an environment variable present in a Container or - Python Module. - - Attributes: - name (str): - Required. Name of the environment variable. - Must be a valid C identifier. - value (str): - Required. Variables that reference a $(VAR_NAME) are - expanded using the previous defined environment variables in - the container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped - references will never be expanded, regardless of whether the - variable exists or not. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py deleted file mode 100644 index ac1f78d44a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Event', - }, -) - - -class Event(proto.Message): - r"""An edge describing the relationship between an Artifact and - an Execution in a lineage graph. - - Attributes: - artifact (str): - Required. The relative resource name of the - Artifact in the Event. - execution (str): - Output only. The relative resource name of - the Execution in the Event. - event_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time the Event occurred. - type_ (google.cloud.aiplatform_v1beta1.types.Event.Type): - Required. The type of the Event. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Event.LabelsEntry]): - The labels with user-defined metadata to - annotate Events. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Event (System labels are - excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - """ - class Type(proto.Enum): - r"""Describes whether an Event's Artifact is the Execution's - input or output. - """ - TYPE_UNSPECIFIED = 0 - INPUT = 1 - OUTPUT = 2 - - artifact = proto.Field( - proto.STRING, - number=1, - ) - execution = proto.Field( - proto.STRING, - number=2, - ) - event_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - type_ = proto.Field( - proto.ENUM, - number=4, - enum=Type, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py deleted file mode 100644 index 2b2e531a85..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Execution', - }, -) - - -class Execution(proto.Message): - r"""Instance of a general execution. - Attributes: - name (str): - Output only. The resource name of the - Execution. - display_name (str): - User provided display name of the Execution. - May be up to 128 Unicode characters. - state (google.cloud.aiplatform_v1beta1.types.Execution.State): - The state of this Execution. This is a - property of the Execution, and does not imply or - capture any ongoing process. This property is - managed by clients (such as Vertex Pipelines) - and the system does not prescribe or check the - validity of state transitions. - etag (str): - An eTag used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Execution.LabelsEntry]): - The labels with user-defined metadata to - organize your Executions. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Execution (System labels are - excluded). - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Execution - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Execution - was last updated. - schema_title (str): - The title of the schema describing the - metadata. - Schema title and version is expected to be - registered in earlier Create Schema calls. And - both are used together as unique identifiers to - identify schemas within the local metadata - store. - schema_version (str): - The version of the schema in ``schema_title`` to use. - - Schema title and version is expected to be registered in - earlier Create Schema calls. And both are used together as - unique identifiers to identify schemas within the local - metadata store. - metadata (google.protobuf.struct_pb2.Struct): - Properties of the Execution. - description (str): - Description of the Execution - """ - class State(proto.Enum): - r"""Describes the state of the Execution.""" - STATE_UNSPECIFIED = 0 - NEW = 1 - RUNNING = 2 - COMPLETE = 3 - FAILED = 4 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - state = proto.Field( - proto.ENUM, - number=6, - enum=State, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=10, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - schema_title = proto.Field( - proto.STRING, - number=13, - ) - schema_version = proto.Field( - proto.STRING, - number=14, - ) - metadata = proto.Field( - proto.MESSAGE, - number=15, - message=struct_pb2.Struct, - ) - description = proto.Field( - proto.STRING, - number=16, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py deleted file mode 100644 index 115897a2f9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py +++ /dev/null @@ -1,644 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Explanation', - 'ModelExplanation', - 'Attribution', - 'ExplanationSpec', - 'ExplanationParameters', - 'SampledShapleyAttribution', - 'IntegratedGradientsAttribution', - 'XraiAttribution', - 'SmoothGradConfig', - 'FeatureNoiseSigma', - 'ExplanationSpecOverride', - 'ExplanationMetadataOverride', - }, -) - - -class Explanation(proto.Message): - r"""Explanation of a prediction (provided in - [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]) - produced by the Model on a given - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. - - Attributes: - attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]): - Output only. Feature attributions grouped by predicted - outputs. - - For Models that predict only one output, such as regression - Models that predict only one score, there is only one - attibution that explains the predicted output. For Models - that predict multiple outputs, such as multiclass Models - that predict multiple classes, each element explains one - specific item. - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - can be used to identify which output this attribution is - explaining. - - If users set - [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k], - the attributions are sorted by - [instance_output_value][Attributions.instance_output_value] - in descending order. If - [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices] - is specified, the attributions are stored by - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - in the same order as they appear in the output_indices. - """ - - attributions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Attribution', - ) - - -class ModelExplanation(proto.Message): - r"""Aggregated explanation metrics for a Model over a set of - instances. - - Attributes: - mean_attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]): - Output only. Aggregated attributions explaining the Model's - prediction outputs over the set of instances. The - attributions are grouped by outputs. - - For Models that predict only one output, such as regression - Models that predict only one score, there is only one - attibution that explains the predicted output. For Models - that predict multiple outputs, such as multiclass Models - that predict multiple classes, each element explains one - specific item. - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - can be used to identify which output this attribution is - explaining. - - The - [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value], - [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value] - and - [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] - fields are averaged over the test data. - - NOTE: Currently AutoML tabular classification Models produce - only one attribution, which averages attributions over all - the classes it predicts. - [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error] - is not populated. - """ - - mean_attributions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Attribution', - ) - - -class Attribution(proto.Message): - r"""Attribution that explains a particular prediction output. - Attributes: - baseline_output_value (float): - Output only. Model predicted output if the input instance is - constructed from the baselines of all the features defined - in - [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. - The field name of the output is determined by the key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. - - If the Model's predicted output has multiple dimensions - (rank > 1), this is the value in the output located by - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. - - If there are multiple baselines, their output values are - averaged. - instance_output_value (float): - Output only. Model predicted output on the corresponding - [explanation instance][ExplainRequest.instances]. The field - name of the output is determined by the key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. - - If the Model predicted output has multiple dimensions, this - is the value in the output located by - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. - feature_attributions (google.protobuf.struct_pb2.Value): - Output only. Attributions of each explained feature. - Features are extracted from the [prediction - instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] - according to [explanation metadata for - inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. - - The value is a struct, whose keys are the name of the - feature. The values are how much the feature in the - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] - contributed to the predicted result. - - The format of the value is determined by the feature's input - format: - - - If the feature is a scalar value, the attribution value - is a [floating - number][google.protobuf.Value.number_value]. - - - If the feature is an array of scalar values, the - attribution value is an - [array][google.protobuf.Value.list_value]. - - - If the feature is a struct, the attribution value is a - [struct][google.protobuf.Value.struct_value]. The keys in - the attribution value struct are the same as the keys in - the feature struct. The formats of the values in the - attribution struct are determined by the formats of the - values in the feature struct. - - The - [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] - field, pointed to by the - [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] - field of the - [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] - object, points to the schema file that describes the - features and their attribution values (if it is populated). - output_index (Sequence[int]): - Output only. The index that locates the explained prediction - output. - - If the prediction output is a scalar value, output_index is - not populated. If the prediction output has multiple - dimensions, the length of the output_index list is the same - as the number of dimensions of the output. The i-th element - in output_index is the element index of the i-th dimension - of the output vector. Indices start from 0. - output_display_name (str): - Output only. The display name of the output identified by - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. - For example, the predicted class name by a - multi-classification Model. - - This field is only populated iff the Model predicts display - names as a separate field along with the explained output. - The predicted display name must has the same shape of the - explained output, and can be located using output_index. - approximation_error (float): - Output only. Error of - [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] - caused by approximation used in the explanation method. - Lower value means more precise attributions. - - - For Sampled Shapley - [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution], - increasing - [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] - might reduce the error. - - For Integrated Gradients - [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], - increasing - [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] - might reduce the error. - - For [XRAI - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], - increasing - [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] - might reduce the error. - - See `this - introduction `__ - for more information. - output_name (str): - Output only. Name of the explain output. Specified as the - key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. - """ - - baseline_output_value = proto.Field( - proto.DOUBLE, - number=1, - ) - instance_output_value = proto.Field( - proto.DOUBLE, - number=2, - ) - feature_attributions = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - output_index = proto.RepeatedField( - proto.INT32, - number=4, - ) - output_display_name = proto.Field( - proto.STRING, - number=5, - ) - approximation_error = proto.Field( - proto.DOUBLE, - number=6, - ) - output_name = proto.Field( - proto.STRING, - number=7, - ) - - -class ExplanationSpec(proto.Message): - r"""Specification of Model explanation. - Attributes: - parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): - Required. Parameters that configure - explaining of the Model's predictions. - metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata): - Required. Metadata describing the Model's - input and output for explanation. - """ - - parameters = proto.Field( - proto.MESSAGE, - number=1, - message='ExplanationParameters', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message=explanation_metadata.ExplanationMetadata, - ) - - -class ExplanationParameters(proto.Message): - r"""Parameters to configure explaining for Model's predictions. - Attributes: - sampled_shapley_attribution (google.cloud.aiplatform_v1beta1.types.SampledShapleyAttribution): - An attribution method that approximates - Shapley values for features that contribute to - the label being predicted. A sampling strategy - is used to approximate the value rather than - considering all subsets of features. Refer to - this paper for model details: - https://arxiv.org/abs/1306.4265. - integrated_gradients_attribution (google.cloud.aiplatform_v1beta1.types.IntegratedGradientsAttribution): - An attribution method that computes Aumann- - hapley values taking advantage of the model's - fully differentiable structure. Refer to this - paper for more details: - https://arxiv.org/abs/1703.01365 - xrai_attribution (google.cloud.aiplatform_v1beta1.types.XraiAttribution): - An attribution method that redistributes - Integrated Gradients attribution to segmented - regions, taking advantage of the model's fully - differentiable structure. Refer to this paper - for more details: - https://arxiv.org/abs/1906.02825 - XRAI currently performs better on natural - images, like a picture of a house or an animal. - If the images are taken in artificial - environments, like a lab or manufacturing line, - or from diagnostic equipment, like x-rays or - quality-control cameras, use Integrated - Gradients instead. - top_k (int): - If populated, returns attributions for top K - indices of outputs (defaults to 1). Only applies - to Models that predicts more than one outputs - (e,g, multi-class Models). When set to -1, - returns explanations for all outputs. - output_indices (google.protobuf.struct_pb2.ListValue): - If populated, only returns attributions that have - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - contained in output_indices. It must be an ndarray of - integers, with the same shape of the output it's explaining. - - If not populated, returns attributions for - [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k] - indices of outputs. If neither top_k nor output_indeices is - populated, returns the argmax index of the outputs. - - Only applicable to Models that predict multiple outputs - (e,g, multi-class Models that predict multiple classes). - """ - - sampled_shapley_attribution = proto.Field( - proto.MESSAGE, - number=1, - oneof='method', - message='SampledShapleyAttribution', - ) - integrated_gradients_attribution = proto.Field( - proto.MESSAGE, - number=2, - oneof='method', - message='IntegratedGradientsAttribution', - ) - xrai_attribution = proto.Field( - proto.MESSAGE, - number=3, - oneof='method', - message='XraiAttribution', - ) - top_k = proto.Field( - proto.INT32, - number=4, - ) - output_indices = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.ListValue, - ) - - -class SampledShapleyAttribution(proto.Message): - r"""An attribution method that approximates Shapley values for - features that contribute to the label being predicted. A - sampling strategy is used to approximate the value rather than - considering all subsets of features. - - Attributes: - path_count (int): - Required. The number of feature permutations to consider - when approximating the Shapley values. - - Valid range of its value is [1, 50], inclusively. - """ - - path_count = proto.Field( - proto.INT32, - number=1, - ) - - -class IntegratedGradientsAttribution(proto.Message): - r"""An attribution method that computes the Aumann-Shapley value - taking advantage of the model's fully differentiable structure. - Refer to this paper for more details: - https://arxiv.org/abs/1703.01365 - - Attributes: - step_count (int): - Required. The number of steps for approximating the path - integral. A good value to start is 50 and gradually increase - until the sum to diff property is within the desired error - range. - - Valid range of its value is [1, 100], inclusively. - smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig): - Config for SmoothGrad approximation of - gradients. - When enabled, the gradients are approximated by - averaging the gradients from noisy samples in - the vicinity of the inputs. Adding noise can - help improve the computed gradients. Refer to - this paper for more details: - https://arxiv.org/pdf/1706.03825.pdf - """ - - step_count = proto.Field( - proto.INT32, - number=1, - ) - smooth_grad_config = proto.Field( - proto.MESSAGE, - number=2, - message='SmoothGradConfig', - ) - - -class XraiAttribution(proto.Message): - r"""An explanation method that redistributes Integrated Gradients - attributions to segmented regions, taking advantage of the - model's fully differentiable structure. Refer to this paper for - more details: https://arxiv.org/abs/1906.02825 - - Supported only by image Models. - - Attributes: - step_count (int): - Required. The number of steps for approximating the path - integral. A good value to start is 50 and gradually increase - until the sum to diff property is met within the desired - error range. - - Valid range of its value is [1, 100], inclusively. - smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig): - Config for SmoothGrad approximation of - gradients. - When enabled, the gradients are approximated by - averaging the gradients from noisy samples in - the vicinity of the inputs. Adding noise can - help improve the computed gradients. Refer to - this paper for more details: - https://arxiv.org/pdf/1706.03825.pdf - """ - - step_count = proto.Field( - proto.INT32, - number=1, - ) - smooth_grad_config = proto.Field( - proto.MESSAGE, - number=2, - message='SmoothGradConfig', - ) - - -class SmoothGradConfig(proto.Message): - r"""Config for SmoothGrad approximation of gradients. - When enabled, the gradients are approximated by averaging the - gradients from noisy samples in the vicinity of the inputs. - Adding noise can help improve the computed gradients. Refer to - this paper for more details: - https://arxiv.org/pdf/1706.03825.pdf - - Attributes: - noise_sigma (float): - This is a single float value and will be used to add noise - to all the features. Use this field when all features are - normalized to have the same distribution: scale to range [0, - 1], [-1, 1] or z-scoring, where features are normalized to - have 0-mean and 1-variance. Learn more about - `normalization `__. - - For best results the recommended value is about 10% - 20% of - the standard deviation of the input feature. Refer to - section 3.2 of the SmoothGrad paper: - https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. - - If the distribution is different per feature, set - [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma] - instead for each feature. - feature_noise_sigma (google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma): - This is similar to - [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma], - but provides additional flexibility. A separate noise sigma - can be provided for each feature, which is useful if their - distributions are different. No noise is added to features - that are not set. If this field is unset, - [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] - will be used for all features. - noisy_sample_count (int): - The number of gradient samples to use for approximation. The - higher this number, the more accurate the gradient is, but - the runtime complexity increases by this factor as well. - Valid range of its value is [1, 50]. Defaults to 3. - """ - - noise_sigma = proto.Field( - proto.FLOAT, - number=1, - oneof='GradientNoiseSigma', - ) - feature_noise_sigma = proto.Field( - proto.MESSAGE, - number=2, - oneof='GradientNoiseSigma', - message='FeatureNoiseSigma', - ) - noisy_sample_count = proto.Field( - proto.INT32, - number=3, - ) - - -class FeatureNoiseSigma(proto.Message): - r"""Noise sigma by features. Noise sigma represents the standard - deviation of the gaussian kernel that will be used to add noise - to interpolated inputs prior to computing gradients. - - Attributes: - noise_sigma (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma.NoiseSigmaForFeature]): - Noise sigma per feature. No noise is added to - features that are not set. - """ - - class NoiseSigmaForFeature(proto.Message): - r"""Noise sigma for a single feature. - Attributes: - name (str): - The name of the input feature for which noise sigma is - provided. The features are defined in [explanation metadata - inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. - sigma (float): - This represents the standard deviation of the Gaussian - kernel that will be used to add noise to the feature prior - to computing gradients. Similar to - [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] - but represents the noise added to the current feature. - Defaults to 0.1. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - sigma = proto.Field( - proto.FLOAT, - number=2, - ) - - noise_sigma = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=NoiseSigmaForFeature, - ) - - -class ExplanationSpecOverride(proto.Message): - r"""The - [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] - entries that can be overridden at [online - explanation][PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] - time. - - Attributes: - parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): - The parameters to be overridden. Note that the - [method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method] - cannot be changed. If not specified, no parameter is - overridden. - metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride): - The metadata to be overridden. If not - specified, no metadata is overridden. - """ - - parameters = proto.Field( - proto.MESSAGE, - number=1, - message='ExplanationParameters', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='ExplanationMetadataOverride', - ) - - -class ExplanationMetadataOverride(proto.Message): - r"""The - [ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata] - entries that can be overridden at [online - explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] - time. - - Attributes: - inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride.InputsEntry]): - Required. Overrides the [input - metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs] - of the features. The key is the name of the feature to be - overridden. The keys specified here must exist in the input - metadata to be overridden. If a feature is not specified - here, the corresponding feature's input metadata is not - overridden. - """ - - class InputMetadataOverride(proto.Message): - r"""The [input - metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] - entries to be overridden. - - Attributes: - input_baselines (Sequence[google.protobuf.struct_pb2.Value]): - Baseline inputs for this feature. - - This overrides the ``input_baseline`` field of the - [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] - object of the corresponding feature's input metadata. If - it's not specified, the original baselines are not - overridden. - """ - - input_baselines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - - inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=InputMetadataOverride, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py deleted file mode 100644 index c2f807c34b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py +++ /dev/null @@ -1,449 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ExplanationMetadata', - }, -) - - -class ExplanationMetadata(proto.Message): - r"""Metadata describing the Model's input and output for - explanation. - - Attributes: - inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputsEntry]): - Required. Map from feature names to feature input metadata. - Keys are the name of the features. Values are the - specification of the feature. - - An empty InputMetadata is valid. It describes a text feature - which has the name specified as the key in - [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. - The baseline of the empty feature is chosen by Vertex AI. - - For Vertex AI-provided Tensorflow images, the key can be any - friendly name of the feature. Once specified, - [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] - are keyed by this key (if not grouped with another feature). - - For custom images, the key must match with the key in - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. - outputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.OutputsEntry]): - Required. Map from output names to output - metadata. - For Vertex AI-provided Tensorflow images, keys - can be any user defined string that consists of - any UTF-8 characters. - For custom images, keys are the name of the - output field in the prediction to be explained. - - Currently only one key is allowed. - feature_attributions_schema_uri (str): - Points to a YAML file stored on Google Cloud Storage - describing the format of the [feature - attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML tabular Models always have this field populated by - Vertex AI. Note: The URI given on output may be different, - including the URI scheme, than the one given on input. The - output URI will point to a location where the user only has - a read access. - """ - - class InputMetadata(proto.Message): - r"""Metadata of the input of a feature. - - Fields other than - [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] - are applicable only for Models that are using Vertex AI-provided - images for Tensorflow. - - Attributes: - input_baselines (Sequence[google.protobuf.struct_pb2.Value]): - Baseline inputs for this feature. - - If no baseline is specified, Vertex AI chooses the baseline - for this feature. If multiple baselines are specified, - Vertex AI returns the average attributions across them in - [Attributions.baseline_attribution][]. - - For Vertex AI-provided Tensorflow images (both 1.x and 2.x), - the shape of each baseline must match the shape of the input - tensor. If a scalar is provided, we broadcast to the same - shape as the input tensor. - - For custom images, the element of the baselines must be in - the same format as the feature's input in the - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - input_tensor_name (str): - Name of the input tensor for this feature. - Required and is only applicable to Vertex AI- - provided images for Tensorflow. - encoding (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Encoding): - Defines how the feature is encoded into the - input tensor. Defaults to IDENTITY. - modality (str): - Modality of the feature. Valid values are: - numeric, image. Defaults to numeric. - feature_value_domain (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.FeatureValueDomain): - The domain details of the input feature - value. Like min/max, original mean or standard - deviation if normalized. - indices_tensor_name (str): - Specifies the index of the values of the input tensor. - Required when the input tensor is a sparse representation. - Refer to Tensorflow documentation for more details: - https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. - dense_shape_tensor_name (str): - Specifies the shape of the values of the input if the input - is a sparse representation. Refer to Tensorflow - documentation for more details: - https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. - index_feature_mapping (Sequence[str]): - A list of feature names for each index in the input tensor. - Required when the input - [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] - is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. - encoded_tensor_name (str): - Encoded tensor is a transformation of the input tensor. Must - be provided if choosing [Integrated Gradients - attribution][ExplanationParameters.integrated_gradients_attribution] - or [XRAI - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution] - and the input tensor is not differentiable. - - An encoded tensor is generated if the input tensor is - encoded by a lookup table. - encoded_baselines (Sequence[google.protobuf.struct_pb2.Value]): - A list of baselines for the encoded tensor. - The shape of each baseline should match the - shape of the encoded tensor. If a scalar is - provided, Vertex AI broadcasts to the same shape - as the encoded tensor. - visualization (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization): - Visualization configurations for image - explanation. - group_name (str): - Name of the group that the input belongs to. Features with - the same group name will be treated as one feature when - computing attributions. Features grouped together can have - different shapes in value. If provided, there will be one - single attribution generated in [ - featureAttributions][Attribution.feature_attributions], - keyed by the group name. - """ - class Encoding(proto.Enum): - r"""Defines how the feature is encoded to [encoded_tensor][]. Defaults - to IDENTITY. - """ - ENCODING_UNSPECIFIED = 0 - IDENTITY = 1 - BAG_OF_FEATURES = 2 - BAG_OF_FEATURES_SPARSE = 3 - INDICATOR = 4 - COMBINED_EMBEDDING = 5 - CONCAT_EMBEDDING = 6 - - class FeatureValueDomain(proto.Message): - r"""Domain details of the input feature value. Provides numeric - information about the feature, such as its range (min, max). If the - feature has been pre-processed, for example with z-scoring, then it - provides information about how to recover the original feature. For - example, if the input feature is an image and it has been - pre-processed to obtain 0-mean and stddev = 1 values, then - original_mean, and original_stddev refer to the mean and stddev of - the original feature (e.g. image tensor) from which input feature - (with mean = 0 and stddev = 1) was obtained. - - Attributes: - min_value (float): - The minimum permissible value for this - feature. - max_value (float): - The maximum permissible value for this - feature. - original_mean (float): - If this input feature has been normalized to a mean value of - 0, the original_mean specifies the mean value of the domain - prior to normalization. - original_stddev (float): - If this input feature has been normalized to a standard - deviation of 1.0, the original_stddev specifies the standard - deviation of the domain prior to normalization. - """ - - min_value = proto.Field( - proto.FLOAT, - number=1, - ) - max_value = proto.Field( - proto.FLOAT, - number=2, - ) - original_mean = proto.Field( - proto.FLOAT, - number=3, - ) - original_stddev = proto.Field( - proto.FLOAT, - number=4, - ) - - class Visualization(proto.Message): - r"""Visualization configurations for image explanation. - Attributes: - type_ (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Type): - Type of the image visualization. Only applicable to - [Integrated Gradients attribution] - [ExplanationParameters.integrated_gradients_attribution]. - OUTLINES shows regions of attribution, while PIXELS shows - per-pixel attribution. Defaults to OUTLINES. - polarity (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Polarity): - Whether to only highlight pixels with - positive contributions, negative or both. - Defaults to POSITIVE. - color_map (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.ColorMap): - The color scheme used for the highlighted areas. - - Defaults to PINK_GREEN for [Integrated Gradients - attribution][ExplanationParameters.integrated_gradients_attribution], - which shows positive attributions in green and negative in - pink. - - Defaults to VIRIDIS for [XRAI - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], - which highlights the most influential regions in yellow and - the least influential in blue. - clip_percent_upperbound (float): - Excludes attributions above the specified percentile from - the highlighted areas. Using the clip_percent_upperbound and - clip_percent_lowerbound together can be useful for filtering - out noise and making it easier to see areas of strong - attribution. Defaults to 99.9. - clip_percent_lowerbound (float): - Excludes attributions below the specified - percentile, from the highlighted areas. Defaults - to 62. - overlay_type (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.OverlayType): - How the original image is displayed in the - visualization. Adjusting the overlay can help - increase visual clarity if the original image - makes it difficult to view the visualization. - Defaults to NONE. - """ - class Type(proto.Enum): - r"""Type of the image visualization. Only applicable to [Integrated - Gradients attribution] - [ExplanationParameters.integrated_gradients_attribution]. - """ - TYPE_UNSPECIFIED = 0 - PIXELS = 1 - OUTLINES = 2 - - class Polarity(proto.Enum): - r"""Whether to only highlight pixels with positive contributions, - negative or both. Defaults to POSITIVE. - """ - POLARITY_UNSPECIFIED = 0 - POSITIVE = 1 - NEGATIVE = 2 - BOTH = 3 - - class ColorMap(proto.Enum): - r"""The color scheme used for highlighting areas.""" - COLOR_MAP_UNSPECIFIED = 0 - PINK_GREEN = 1 - VIRIDIS = 2 - RED = 3 - GREEN = 4 - RED_GREEN = 6 - PINK_WHITE_GREEN = 5 - - class OverlayType(proto.Enum): - r"""How the original image is displayed in the visualization.""" - OVERLAY_TYPE_UNSPECIFIED = 0 - NONE = 1 - ORIGINAL = 2 - GRAYSCALE = 3 - MASK_BLACK = 4 - - type_ = proto.Field( - proto.ENUM, - number=1, - enum='ExplanationMetadata.InputMetadata.Visualization.Type', - ) - polarity = proto.Field( - proto.ENUM, - number=2, - enum='ExplanationMetadata.InputMetadata.Visualization.Polarity', - ) - color_map = proto.Field( - proto.ENUM, - number=3, - enum='ExplanationMetadata.InputMetadata.Visualization.ColorMap', - ) - clip_percent_upperbound = proto.Field( - proto.FLOAT, - number=4, - ) - clip_percent_lowerbound = proto.Field( - proto.FLOAT, - number=5, - ) - overlay_type = proto.Field( - proto.ENUM, - number=6, - enum='ExplanationMetadata.InputMetadata.Visualization.OverlayType', - ) - - input_baselines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - input_tensor_name = proto.Field( - proto.STRING, - number=2, - ) - encoding = proto.Field( - proto.ENUM, - number=3, - enum='ExplanationMetadata.InputMetadata.Encoding', - ) - modality = proto.Field( - proto.STRING, - number=4, - ) - feature_value_domain = proto.Field( - proto.MESSAGE, - number=5, - message='ExplanationMetadata.InputMetadata.FeatureValueDomain', - ) - indices_tensor_name = proto.Field( - proto.STRING, - number=6, - ) - dense_shape_tensor_name = proto.Field( - proto.STRING, - number=7, - ) - index_feature_mapping = proto.RepeatedField( - proto.STRING, - number=8, - ) - encoded_tensor_name = proto.Field( - proto.STRING, - number=9, - ) - encoded_baselines = proto.RepeatedField( - proto.MESSAGE, - number=10, - message=struct_pb2.Value, - ) - visualization = proto.Field( - proto.MESSAGE, - number=11, - message='ExplanationMetadata.InputMetadata.Visualization', - ) - group_name = proto.Field( - proto.STRING, - number=12, - ) - - class OutputMetadata(proto.Message): - r"""Metadata of the prediction output to be explained. - Attributes: - index_display_name_mapping (google.protobuf.struct_pb2.Value): - Static mapping between the index and display name. - - Use this if the outputs are a deterministic n-dimensional - array, e.g. a list of scores of all the classes in a - pre-defined order for a multi-classification Model. It's not - feasible if the outputs are non-deterministic, e.g. the - Model produces top-k classes or sort the outputs by their - values. - - The shape of the value must be an n-dimensional array of - strings. The number of dimensions must match that of the - outputs to be explained. The - [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] - is populated by locating in the mapping with - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. - display_name_mapping_key (str): - Specify a field name in the prediction to look for the - display name. - - Use this if the prediction contains the display names for - the outputs. - - The display names in the prediction must have the same shape - of the outputs, so that it can be located by - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - for a specific output. - output_tensor_name (str): - Name of the output tensor. Required and is - only applicable to AI Platform provided images - for Tensorflow. - """ - - index_display_name_mapping = proto.Field( - proto.MESSAGE, - number=1, - oneof='display_name_mapping', - message=struct_pb2.Value, - ) - display_name_mapping_key = proto.Field( - proto.STRING, - number=2, - oneof='display_name_mapping', - ) - output_tensor_name = proto.Field( - proto.STRING, - number=3, - ) - - inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=InputMetadata, - ) - outputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message=OutputMetadata, - ) - feature_attributions_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py deleted file mode 100644 index 1897e74798..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py +++ /dev/null @@ -1,150 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Feature', - }, -) - - -class Feature(proto.Message): - r"""Feature Metadata information that describes an attribute of - an entity type. For example, apple is an entity type, and color - is a feature that describes apple. - - Attributes: - name (str): - Immutable. Name of the Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - The last part feature is assigned by the client. The feature - can be up to 64 characters long and can consist only of - ASCII Latin letters A-Z and a-z, underscore(_), and ASCII - digits 0-9 starting with a letter. The value will be unique - given an entity type. - description (str): - Description of the Feature. - value_type (google.cloud.aiplatform_v1beta1.types.Feature.ValueType): - Required. Immutable. Type of Feature value. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Feature.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your Features. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - on and examples of labels. No more than 64 user - labels can be associated with one Feature - (System labels are excluded)." - System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): - Optional. The custom monitoring configuration for this - Feature, if not set, use the monitoring_config defined for - the EntityType this Feature belongs to. - - If this is populated with - [FeaturestoreMonitoringConfig.disabled][] = true, snapshot - analysis monitoring is disabled; if - [FeaturestoreMonitoringConfig.monitoring_interval][] - specified, snapshot analysis monitoring is enabled. - Otherwise, snapshot analysis monitoring config is same as - the EntityType's this Feature belongs to. - monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): - Output only. A list of historical [Snapshot - Analysis][google.cloud.aiplatform.master.FeaturestoreMonitoringConfig.SnapshotAnalysis] - stats requested by user, sorted by - [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time] - descending. - """ - class ValueType(proto.Enum): - r"""An enum representing the value type of a feature.""" - VALUE_TYPE_UNSPECIFIED = 0 - BOOL = 1 - BOOL_ARRAY = 2 - DOUBLE = 3 - DOUBLE_ARRAY = 4 - INT64 = 9 - INT64_ARRAY = 10 - STRING = 11 - STRING_ARRAY = 12 - BYTES = 13 - - name = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - value_type = proto.Field( - proto.ENUM, - number=3, - enum=ValueType, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - monitoring_config = proto.Field( - proto.MESSAGE, - number=9, - message=featurestore_monitoring.FeaturestoreMonitoringConfig, - ) - monitoring_stats = proto.RepeatedField( - proto.MESSAGE, - number=10, - message=feature_monitoring_stats.FeatureStatsAnomaly, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py deleted file mode 100644 index 48dd2b3b84..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'FeatureStatsAnomaly', - }, -) - - -class FeatureStatsAnomaly(proto.Message): - r"""Stats and Anomaly generated at specific timestamp for specific - Feature. The start_time and end_time are used to define the time - range of the dataset that current stats belongs to, e.g. prediction - traffic is bucketed into prediction datasets by time window. If the - Dataset is not defined by time window, start_time = end_time. - Timestamp of the stats and anomalies always refers to end_time. Raw - stats and anomalies are stored in stats_uri or anomaly_uri in the - tensorflow defined protos. Field data_stats contains almost - identical information with the raw stats in Vertex AI defined proto, - for UI to display. - - Attributes: - score (float): - Feature importance score, only populated when cross-feature - monitoring is enabled. For now only used to represent - feature attribution score within range [0, 1] for - [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW] - and - [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT]. - stats_uri (str): - Path of the stats file for current feature values in Cloud - Storage bucket. Format: - gs:////stats. Example: - gs://monitoring_bucket/feature_name/stats. Stats are stored - as binary format with Protobuf message - `tensorflow.metadata.v0.FeatureNameStatistics `__. - anomaly_uri (str): - Path of the anomaly file for current feature values in Cloud - Storage bucket. Format: - gs:////anomalies. Example: - gs://monitoring_bucket/feature_name/anomalies. Stats are - stored as binary format with Protobuf message Anoamlies are - stored as binary format with Protobuf message - [tensorflow.metadata.v0.AnomalyInfo] - (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). - distribution_deviation (float): - Deviation from the current stats to baseline - stats. 1. For categorical feature, the - distribution distance is calculated by - L-inifinity norm. - 2. For numerical feature, the distribution - distance is calculated by Jensen–Shannon - divergence. - anomaly_detection_threshold (float): - This is the threshold used when detecting anomalies. The - threshold can be changed by user, so this one might be - different from - [ThresholdConfig.value][google.cloud.aiplatform.v1beta1.ThresholdConfig.value]. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The start timestamp of window where stats were generated. - For objectives where time window doesn't make sense (e.g. - Featurestore Snapshot Monitoring), start_time is only used - to indicate the monitoring intervals, so it always equals to - (end_time - monitoring_interval). - end_time (google.protobuf.timestamp_pb2.Timestamp): - The end timestamp of window where stats were generated. For - objectives where time window doesn't make sense (e.g. - Featurestore Snapshot Monitoring), end_time indicates the - timestamp of the data used to generate stats (e.g. timestamp - we take snapshots for feature values). - """ - - score = proto.Field( - proto.DOUBLE, - number=1, - ) - stats_uri = proto.Field( - proto.STRING, - number=3, - ) - anomaly_uri = proto.Field( - proto.STRING, - number=4, - ) - distribution_deviation = proto.Field( - proto.DOUBLE, - number=5, - ) - anomaly_detection_threshold = proto.Field( - proto.DOUBLE, - number=9, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py deleted file mode 100644 index 3921a7c769..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'IdMatcher', - 'FeatureSelector', - }, -) - - -class IdMatcher(proto.Message): - r"""Matcher for Features of an EntityType by Feature ID. - Attributes: - ids (Sequence[str]): - Required. The following are accepted as ``ids``: - - - A single-element list containing only ``*``, which - selects all Features in the target EntityType, or - - A list containing only Feature IDs, which selects only - Features with those IDs in the target EntityType. - """ - - ids = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class FeatureSelector(proto.Message): - r"""Selector for Features of an EntityType. - Attributes: - id_matcher (google.cloud.aiplatform_v1beta1.types.IdMatcher): - Required. Matches Features based on ID. - """ - - id_matcher = proto.Field( - proto.MESSAGE, - number=1, - message='IdMatcher', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py deleted file mode 100644 index 6d51c0c35b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Featurestore', - }, -) - - -class Featurestore(proto.Message): - r"""Featurestore configuration information on how the - Featurestore is configured. - - Attributes: - name (str): - Output only. Name of the Featurestore. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Featurestore - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Featurestore - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your Featurestore. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - on and examples of labels. No more than 64 user - labels can be associated with one - Featurestore(System labels are excluded)." - System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - online_serving_config (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig): - Required. Config for online serving - resources. - state (google.cloud.aiplatform_v1beta1.types.Featurestore.State): - Output only. State of the featurestore. - """ - class State(proto.Enum): - r"""Possible states a Featurestore can have.""" - STATE_UNSPECIFIED = 0 - STABLE = 1 - UPDATING = 2 - - class OnlineServingConfig(proto.Message): - r"""OnlineServingConfig specifies the details for provisioning - online serving resources. - - Attributes: - fixed_node_count (int): - Required. The number of nodes for each - cluster. The number of nodes will not scale - automatically but can be scaled manually by - providing different values when updating. - """ - - fixed_node_count = proto.Field( - proto.INT32, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - online_serving_config = proto.Field( - proto.MESSAGE, - number=7, - message=OnlineServingConfig, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=State, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py deleted file mode 100644 index ba63973bc8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'FeaturestoreMonitoringConfig', - }, -) - - -class FeaturestoreMonitoringConfig(proto.Message): - r"""Configuration of how features in Featurestore are monitored. - Attributes: - snapshot_analysis (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.SnapshotAnalysis): - The config for Snapshot Analysis Based - Feature Monitoring. - """ - - class SnapshotAnalysis(proto.Message): - r"""Configuration of the Featurestore's Snapshot Analysis Based - Monitoring. This type of analysis generates statistics for each - Feature based on a snapshot of the latest feature value of each - entities every monitoring_interval. - - Attributes: - disabled (bool): - The monitoring schedule for snapshot analysis. For - EntityType-level config: unset / disabled = true indicates - disabled by default for Features under it; otherwise by - default enable snapshot analysis monitoring with - monitoring_interval for Features under it. Feature-level - config: disabled = true indicates disabled regardless of the - EntityType-level config; unset monitoring_interval indicates - going with EntityType-level config; otherwise run snapshot - analysis monitoring with monitoring_interval regardless of - the EntityType-level config. Explicitly Disable the snapshot - analysis based monitoring. - monitoring_interval (google.protobuf.duration_pb2.Duration): - Configuration of the snapshot analysis based - monitoring pipeline running interval. The value - is rolled up to full day. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - monitoring_interval = proto.Field( - proto.MESSAGE, - number=2, - message=duration_pb2.Duration, - ) - - snapshot_analysis = proto.Field( - proto.MESSAGE, - number=1, - message=SnapshotAnalysis, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py deleted file mode 100644 index aeb8c1cd53..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import feature_selector as gca_feature_selector -from google.cloud.aiplatform_v1beta1.types import types -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ReadFeatureValuesRequest', - 'ReadFeatureValuesResponse', - 'StreamingReadFeatureValuesRequest', - 'FeatureValue', - 'FeatureValueList', - }, -) - - -class ReadFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - Attributes: - entity_type (str): - Required. The resource name of the EntityType for the entity - being read. Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting user - clicks on a website, an EntityType ID could be "user". - entity_id (str): - Required. ID for a specific entity. For example, for a - machine learning model predicting user clicks on a website, - an entity ID could be "user_123". - feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): - Required. Selector choosing Features of the - target EntityType. - """ - - entity_type = proto.Field( - proto.STRING, - number=1, - ) - entity_id = proto.Field( - proto.STRING, - number=2, - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=3, - message=gca_feature_selector.FeatureSelector, - ) - - -class ReadFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - Attributes: - header (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.Header): - Response header. - entity_view (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView): - Entity view with Feature values. This may be - the entity in the Featurestore if values for all - Features were requested, or a projection of the - entity in the Featurestore if values for only - some Features were requested. - """ - - class FeatureDescriptor(proto.Message): - r"""Metadata for requested Features. - Attributes: - id (str): - Feature ID. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - - class Header(proto.Message): - r"""Response header with metadata for the requested - [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest.entity_type] - and Features. - - Attributes: - entity_type (str): - The resource name of the EntityType from the - [ReadFeatureValuesRequest][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest]. - Value format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. - feature_descriptors (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.FeatureDescriptor]): - List of Feature metadata corresponding to each piece of - [ReadFeatureValuesResponse.data][]. - """ - - entity_type = proto.Field( - proto.STRING, - number=1, - ) - feature_descriptors = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ReadFeatureValuesResponse.FeatureDescriptor', - ) - - class EntityView(proto.Message): - r"""Entity view with Feature values. - Attributes: - entity_id (str): - ID of the requested entity. - data (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView.Data]): - Each piece of data holds the k requested values for one - requested Feature. If no values for the requested Feature - exist, the corresponding cell will be empty. This has the - same size and is in the same order as the features from the - header - [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1beta1.ReadFeatureValuesResponse.header]. - """ - - class Data(proto.Message): - r"""Container to hold value(s), successive in time, for one - Feature from the request. - - Attributes: - value (google.cloud.aiplatform_v1beta1.types.FeatureValue): - Feature value if a single value is requested. - values (google.cloud.aiplatform_v1beta1.types.FeatureValueList): - Feature values list if values, successive in - time, are requested. If the requested number of - values is greater than the number of existing - Feature values, nonexistent values are omitted - instead of being returned as empty. - """ - - value = proto.Field( - proto.MESSAGE, - number=1, - oneof='data', - message='FeatureValue', - ) - values = proto.Field( - proto.MESSAGE, - number=2, - oneof='data', - message='FeatureValueList', - ) - - entity_id = proto.Field( - proto.STRING, - number=1, - ) - data = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ReadFeatureValuesResponse.EntityView.Data', - ) - - header = proto.Field( - proto.MESSAGE, - number=1, - message=Header, - ) - entity_view = proto.Field( - proto.MESSAGE, - number=2, - message=EntityView, - ) - - -class StreamingReadFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. - - Attributes: - entity_type (str): - Required. The resource name of the entities' type. Value - format: - ``projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting user - clicks on a website, an EntityType ID could be "user". - entity_ids (Sequence[str]): - Required. IDs of entities to read Feature values of. For - example, for a machine learning model predicting user clicks - on a website, an entity ID could be "user_123". - feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): - Required. Selector choosing Features of the - target EntityType. - """ - - entity_type = proto.Field( - proto.STRING, - number=1, - ) - entity_ids = proto.RepeatedField( - proto.STRING, - number=2, - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=3, - message=gca_feature_selector.FeatureSelector, - ) - - -class FeatureValue(proto.Message): - r"""Value for a feature. - NEXT ID: 15 - - Attributes: - bool_value (bool): - Bool type feature value. - double_value (float): - Double type feature value. - int64_value (int): - Int64 feature value. - string_value (str): - String feature value. - bool_array_value (google.cloud.aiplatform_v1beta1.types.BoolArray): - A list of bool type feature value. - double_array_value (google.cloud.aiplatform_v1beta1.types.DoubleArray): - A list of double type feature value. - int64_array_value (google.cloud.aiplatform_v1beta1.types.Int64Array): - A list of int64 type feature value. - string_array_value (google.cloud.aiplatform_v1beta1.types.StringArray): - A list of string type feature value. - bytes_value (bytes): - Bytes feature value. - metadata (google.cloud.aiplatform_v1beta1.types.FeatureValue.Metadata): - Output only. Metadata of feature value. - """ - - class Metadata(proto.Message): - r"""Metadata of feature value. - Attributes: - generate_time (google.protobuf.timestamp_pb2.Timestamp): - Feature generation timestamp. Typically, it - is provided by user at feature ingestion time. - If not, feature store will use the system - timestamp when the data is ingested into feature - store. - """ - - generate_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - - bool_value = proto.Field( - proto.BOOL, - number=1, - oneof='value', - ) - double_value = proto.Field( - proto.DOUBLE, - number=2, - oneof='value', - ) - int64_value = proto.Field( - proto.INT64, - number=5, - oneof='value', - ) - string_value = proto.Field( - proto.STRING, - number=6, - oneof='value', - ) - bool_array_value = proto.Field( - proto.MESSAGE, - number=7, - oneof='value', - message=types.BoolArray, - ) - double_array_value = proto.Field( - proto.MESSAGE, - number=8, - oneof='value', - message=types.DoubleArray, - ) - int64_array_value = proto.Field( - proto.MESSAGE, - number=11, - oneof='value', - message=types.Int64Array, - ) - string_array_value = proto.Field( - proto.MESSAGE, - number=12, - oneof='value', - message=types.StringArray, - ) - bytes_value = proto.Field( - proto.BYTES, - number=13, - oneof='value', - ) - metadata = proto.Field( - proto.MESSAGE, - number=14, - message=Metadata, - ) - - -class FeatureValueList(proto.Message): - r"""Container for list of values. - Attributes: - values (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureValue]): - A list of feature values. All of them should - be the same data type. - """ - - values = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='FeatureValue', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py deleted file mode 100644 index 09c8e10fdc..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ /dev/null @@ -1,1500 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import feature_selector as gca_feature_selector -from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateFeaturestoreRequest', - 'GetFeaturestoreRequest', - 'ListFeaturestoresRequest', - 'ListFeaturestoresResponse', - 'UpdateFeaturestoreRequest', - 'DeleteFeaturestoreRequest', - 'ImportFeatureValuesRequest', - 'ImportFeatureValuesResponse', - 'BatchReadFeatureValuesRequest', - 'ExportFeatureValuesRequest', - 'DestinationFeatureSetting', - 'FeatureValueDestination', - 'ExportFeatureValuesResponse', - 'BatchReadFeatureValuesResponse', - 'CreateEntityTypeRequest', - 'GetEntityTypeRequest', - 'ListEntityTypesRequest', - 'ListEntityTypesResponse', - 'UpdateEntityTypeRequest', - 'DeleteEntityTypeRequest', - 'CreateFeatureRequest', - 'BatchCreateFeaturesRequest', - 'BatchCreateFeaturesResponse', - 'GetFeatureRequest', - 'ListFeaturesRequest', - 'ListFeaturesResponse', - 'SearchFeaturesRequest', - 'SearchFeaturesResponse', - 'UpdateFeatureRequest', - 'DeleteFeatureRequest', - 'CreateFeaturestoreOperationMetadata', - 'UpdateFeaturestoreOperationMetadata', - 'ImportFeatureValuesOperationMetadata', - 'ExportFeatureValuesOperationMetadata', - 'BatchReadFeatureValuesOperationMetadata', - 'CreateEntityTypeOperationMetadata', - 'CreateFeatureOperationMetadata', - 'BatchCreateFeaturesOperationMetadata', - }, -) - - -class CreateFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. - - Attributes: - parent (str): - Required. The resource name of the Location to create - Featurestores. Format: - ``projects/{project}/locations/{location}'`` - featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): - Required. The Featurestore to create. - featurestore_id (str): - Required. The ID to use for this Featurestore, which will - become the final component of the Featurestore's resource - name. - - This value may be up to 60 characters, and valid characters - are ``[a-z0-9_]``. The first character cannot be a number. - - The value must be unique within the project and location. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - featurestore = proto.Field( - proto.MESSAGE, - number=2, - message=gca_featurestore.Featurestore, - ) - featurestore_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. - - Attributes: - name (str): - Required. The name of the Featurestore - resource. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListFeaturestoresRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - - Attributes: - parent (str): - Required. The resource name of the Location to list - Featurestores. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Lists the featurestores that match the filter expression. - The following fields are supported: - - - ``create_time``: Supports =, !=, <, >, <=, and >= - comparisons. Values must be in RFC 3339 format. - - ``update_time``: Supports =, !=, <, >, <=, and >= - comparisons. Values must be in RFC 3339 format. - - ``online_serving_config.fixed_node_count``: Supports =, - !=, <, >, <=, and >= comparisons. - - ``labels``: Supports key-value equality and key presence. - - Examples: - - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - Featurestores created or updated after 2020-01-01. - - ``labels.env = "prod"`` Featurestores with label "env" - set to "prod". - page_size (int): - The maximum number of Featurestores to - return. The service may return fewer than this - value. If unspecified, at most 100 Featurestores - will be returned. The maximum value is 100; any - value greater than 100 will be coerced to 100. - page_token (str): - A page token, received from a previous - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] - must match the call that provided the page token. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported Fields: - - - ``create_time`` - - ``update_time`` - - ``online_serving_config.fixed_node_count`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListFeaturestoresResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - - Attributes: - featurestores (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore]): - The Featurestores matching the request. - next_page_token (str): - A token, which can be sent as - [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturestoresRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - featurestores = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_featurestore.Featurestore, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. - - Attributes: - featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): - Required. The Featurestore's ``name`` field is used to - identify the Featurestore to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be overwritten - in the Featurestore resource by the update. The fields - specified in the update_mask are relative to the resource, - not the full request. A field will be overwritten if it is - in the mask. If the user does not provide a mask then only - the non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override all - fields. - - Updatable fields: - - - ``labels`` - - ``online_serving_config.fixed_node_count`` - """ - - featurestore = proto.Field( - proto.MESSAGE, - number=1, - message=gca_featurestore.Featurestore, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. - - Attributes: - name (str): - Required. The name of the Featurestore to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - force (bool): - If set to true, any EntityTypes and Features - for this Featurestore will also be deleted. - (Otherwise, the request will only work if the - Featurestore has no EntityTypes.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class ImportFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - - Attributes: - avro_source (google.cloud.aiplatform_v1beta1.types.AvroSource): - - bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): - - csv_source (google.cloud.aiplatform_v1beta1.types.CsvSource): - - feature_time_field (str): - Source column that holds the Feature - timestamp for all Feature values in each entity. - feature_time (google.protobuf.timestamp_pb2.Timestamp): - Single Feature timestamp for all entities - being imported. The timestamp must not have - higher than millisecond precision. - entity_type (str): - Required. The resource name of the EntityType grouping the - Features for which values are being imported. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` - entity_id_field (str): - Source column that holds entity IDs. If not provided, entity - IDs are extracted from the column named ``entity_id``. - feature_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest.FeatureSpec]): - Required. Specifications defining which Feature values to - import from the entity. The request fails if no - feature_specs are provided, and having multiple - feature_specs for one Feature is not allowed. - disable_online_serving (bool): - If set, data will not be imported for online - serving. This is typically used for backfilling, - where Feature generation timestamps are not in - the timestamp range needed for online serving. - worker_count (int): - Specifies the number of workers that are used - to write data to the Featurestore. Consider the - online serving capacity that you require to - achieve the desired import throughput without - interfering with online serving. The value must - be positive, and less than or equal to 100. If - not set, defaults to using 1 worker. The low - count ensures minimal impact on online serving - performance. - """ - - class FeatureSpec(proto.Message): - r"""Defines the Feature value(s) to import. - Attributes: - id (str): - Required. ID of the Feature to import values - of. This Feature must exist in the target - EntityType, or the request will fail. - source_field (str): - Source column to get the Feature values from. - If not set, uses the column with the same name - as the Feature ID. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - source_field = proto.Field( - proto.STRING, - number=2, - ) - - avro_source = proto.Field( - proto.MESSAGE, - number=2, - oneof='source', - message=io.AvroSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=3, - oneof='source', - message=io.BigQuerySource, - ) - csv_source = proto.Field( - proto.MESSAGE, - number=4, - oneof='source', - message=io.CsvSource, - ) - feature_time_field = proto.Field( - proto.STRING, - number=6, - oneof='feature_time_source', - ) - feature_time = proto.Field( - proto.MESSAGE, - number=7, - oneof='feature_time_source', - message=timestamp_pb2.Timestamp, - ) - entity_type = proto.Field( - proto.STRING, - number=1, - ) - entity_id_field = proto.Field( - proto.STRING, - number=5, - ) - feature_specs = proto.RepeatedField( - proto.MESSAGE, - number=8, - message=FeatureSpec, - ) - disable_online_serving = proto.Field( - proto.BOOL, - number=9, - ) - worker_count = proto.Field( - proto.INT32, - number=11, - ) - - -class ImportFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - - Attributes: - imported_entity_count (int): - Number of entities that have been imported by - the operation. - imported_feature_value_count (int): - Number of Feature values that have been - imported by the operation. - invalid_row_count (int): - The number of rows in input source that weren't imported due - to either - - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). - """ - - imported_entity_count = proto.Field( - proto.INT64, - number=1, - ) - imported_feature_value_count = proto.Field( - proto.INT64, - number=2, - ) - invalid_row_count = proto.Field( - proto.INT64, - number=6, - ) - - -class BatchReadFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - (- Next Id: 6 -) - - Attributes: - csv_read_instances (google.cloud.aiplatform_v1beta1.types.CsvSource): - Each read instance consists of exactly one read timestamp - and one or more entity IDs identifying entities of the - corresponding EntityTypes whose Features are requested. - - Each output instance contains Feature values of requested - entities concatenated together as of the read time. - - An example read instance may be - ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z``. - - An example output instance may be - ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value``. - - Timestamp in each read instance must be millisecond-aligned. - - ``csv_read_instances`` are read instances stored in a - plain-text CSV file. The header should be: - [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp - - The columns can be in any order. - - Values in the timestamp column must use the RFC 3339 format, - e.g. ``2012-07-30T10:43:17.123Z``. - featurestore (str): - Required. The resource name of the Featurestore from which - to query Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): - Required. Specifies output location and - format. - entity_type_specs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest.EntityTypeSpec]): - Required. Specifies EntityType grouping Features to read - values of and settings. Each EntityType referenced in - [BatchReadFeatureValuesRequest.entity_type_specs] must have - a column specifying entity IDs in tha EntityType in - [BatchReadFeatureValuesRequest.request][] . - """ - - class EntityTypeSpec(proto.Message): - r"""Selects Features of an EntityType to read values of and - specifies read settings. - - Attributes: - entity_type_id (str): - Required. ID of the EntityType to select Features. The - EntityType id is the - [entity_type_id][google.cloud.aiplatform.v1beta1.CreateEntityTypeRequest.entity_type_id] - specified during EntityType creation. - feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): - Required. Selectors choosing which Feature - values to read from the EntityType. - settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): - Per-Feature settings for the batch read. - """ - - entity_type_id = proto.Field( - proto.STRING, - number=1, - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=2, - message=gca_feature_selector.FeatureSelector, - ) - settings = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='DestinationFeatureSetting', - ) - - csv_read_instances = proto.Field( - proto.MESSAGE, - number=3, - oneof='read_option', - message=io.CsvSource, - ) - featurestore = proto.Field( - proto.STRING, - number=1, - ) - destination = proto.Field( - proto.MESSAGE, - number=4, - message='FeatureValueDestination', - ) - entity_type_specs = proto.RepeatedField( - proto.MESSAGE, - number=7, - message=EntityTypeSpec, - ) - - -class ExportFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - - Attributes: - snapshot_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.SnapshotExport): - Exports Feature values of all entities of the - EntityType as of a snapshot time. - entity_type (str): - Required. The resource name of the EntityType from which to - export Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): - Required. Specifies destination location and - format. - feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): - Required. Selects Features to export values - of. - settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): - Per-Feature export settings. - """ - - class SnapshotExport(proto.Message): - r"""Describes exporting Feature values as of the snapshot - timestamp. - - Attributes: - snapshot_time (google.protobuf.timestamp_pb2.Timestamp): - Exports Feature values as of this timestamp. - If not set, retrieve values as of now. - Timestamp, if present, must not have higher than - millisecond precision. - """ - - snapshot_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - - snapshot_export = proto.Field( - proto.MESSAGE, - number=3, - oneof='mode', - message=SnapshotExport, - ) - entity_type = proto.Field( - proto.STRING, - number=1, - ) - destination = proto.Field( - proto.MESSAGE, - number=4, - message='FeatureValueDestination', - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=5, - message=gca_feature_selector.FeatureSelector, - ) - settings = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='DestinationFeatureSetting', - ) - - -class DestinationFeatureSetting(proto.Message): - r""" - Attributes: - feature_id (str): - Required. The ID of the Feature to apply the - setting to. - destination_field (str): - Specify the field name in the export - destination. If not specified, Feature ID is - used. - """ - - feature_id = proto.Field( - proto.STRING, - number=1, - ) - destination_field = proto.Field( - proto.STRING, - number=2, - ) - - -class FeatureValueDestination(proto.Message): - r"""A destination location for Feature values and format. - Attributes: - bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): - Output in BigQuery format. - [BigQueryDestination.output_uri][google.cloud.aiplatform.v1beta1.BigQueryDestination.output_uri] - in - [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1beta1.FeatureValueDestination.bigquery_destination] - must refer to a table. - tfrecord_destination (google.cloud.aiplatform_v1beta1.types.TFRecordDestination): - Output in TFRecord format. - - Below are the mapping from Feature value type in - Featurestore to Feature value type in TFRecord: - - :: - - Value type in Featurestore | Value type in TFRecord - DOUBLE, DOUBLE_ARRAY | FLOAT_LIST - INT64, INT64_ARRAY | INT64_LIST - STRING, STRING_ARRAY, BYTES | BYTES_LIST - true -> byte_string("true"), false -> byte_string("false") - BOOL, BOOL_ARRAY (true, false) | BYTES_LIST - csv_destination (google.cloud.aiplatform_v1beta1.types.CsvDestination): - Output in CSV format. Array Feature value - types are not allowed in CSV format. - """ - - bigquery_destination = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message=io.BigQueryDestination, - ) - tfrecord_destination = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message=io.TFRecordDestination, - ) - csv_destination = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message=io.CsvDestination, - ) - - -class ExportFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - """ - - -class BatchReadFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - """ - - -class CreateEntityTypeRequest(proto.Message): - r"""Request message for - [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. - - Attributes: - parent (str): - Required. The resource name of the Featurestore to create - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): - The EntityType to create. - entity_type_id (str): - Required. The ID to use for the EntityType, which will - become the final component of the EntityType's resource - name. - - This value may be up to 60 characters, and valid characters - are ``[a-z0-9_]``. The first character cannot be a number. - - The value must be unique within a featurestore. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - entity_type = proto.Field( - proto.MESSAGE, - number=2, - message=gca_entity_type.EntityType, - ) - entity_type_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetEntityTypeRequest(proto.Message): - r"""Request message for - [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. - - Attributes: - name (str): - Required. The name of the EntityType resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListEntityTypesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - - Attributes: - parent (str): - Required. The resource name of the Featurestore to list - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - filter (str): - Lists the EntityTypes that match the filter expression. The - following filters are supported: - - - ``create_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``update_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. - - Examples: - - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - EntityTypes having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any EntityType which has a label - with 'env' as the key. - page_size (int): - The maximum number of EntityTypes to return. - The service may return fewer than this value. If - unspecified, at most 1000 EntityTypes will be - returned. The maximum value is 1000; any value - greater than 1000 will be coerced to 1000. - page_token (str): - A page token, received from a previous - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] - must match the call that provided the page token. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. - - Supported fields: - - - ``entity_type_id`` - - ``create_time`` - - ``update_time`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListEntityTypesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - - Attributes: - entity_types (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType]): - The EntityTypes matching the request. - next_page_token (str): - A token, which can be sent as - [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1beta1.ListEntityTypesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - entity_types = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_entity_type.EntityType, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateEntityTypeRequest(proto.Message): - r"""Request message for - [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. - - Attributes: - entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): - Required. The EntityType's ``name`` field is used to - identify the EntityType to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be overwritten - in the EntityType resource by the update. The fields - specified in the update_mask are relative to the resource, - not the full request. A field will be overwritten if it is - in the mask. If the user does not provide a mask then only - the non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override all - fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - """ - - entity_type = proto.Field( - proto.MESSAGE, - number=1, - message=gca_entity_type.EntityType, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteEntityTypeRequest(proto.Message): - r"""Request message for [FeaturestoreService.DeleteEntityTypes][]. - Attributes: - name (str): - Required. The name of the EntityType to be deleted. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - force (bool): - If set to true, any Features for this - EntityType will also be deleted. (Otherwise, the - request will only work if the EntityType has no - Features.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class CreateFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. - - Attributes: - parent (str): - Required. The resource name of the EntityType to create a - Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - feature (google.cloud.aiplatform_v1beta1.types.Feature): - Required. The Feature to create. - feature_id (str): - Required. The ID to use for the Feature, which will become - the final component of the Feature's resource name. - - This value may be up to 60 characters, and valid characters - are ``[a-z0-9_]``. The first character cannot be a number. - - The value must be unique within an EntityType. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - feature = proto.Field( - proto.MESSAGE, - number=2, - message=gca_feature.Feature, - ) - feature_id = proto.Field( - proto.STRING, - number=3, - ) - - -class BatchCreateFeaturesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - - Attributes: - parent (str): - Required. The resource name of the EntityType to create the - batch of Features under. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): - Required. The request message specifying the Features to - create. All Features must be created under the same parent - EntityType. The ``parent`` field in each child request - message can be omitted. If ``parent`` is set in a child - request, then the value must match the ``parent`` value in - this request message. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='CreateFeatureRequest', - ) - - -class BatchCreateFeaturesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - - Attributes: - features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): - The Features created. - """ - - features = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - - -class GetFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. - - Attributes: - name (str): - Required. The name of the Feature resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListFeaturesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - - Attributes: - parent (str): - Required. The resource name of the Location to list - Features. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - filter (str): - Lists the Features that match the filter expression. The - following filters are supported: - - - ``value_type``: Supports = and != comparisons. - - ``create_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``update_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. - - Examples: - - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - 'env' as the key. - page_size (int): - The maximum number of Features to return. The - service may return fewer than this value. If - unspecified, at most 1000 Features will be - returned. The maximum value is 1000; any value - greater than 1000 will be coerced to 1000. - page_token (str): - A page token, received from a previous - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] - must match the call that provided the page token. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``feature_id`` - - ``value_type`` - - ``create_time`` - - ``update_time`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - latest_stats_count (int): - If set, return the most recent - [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count] - of stats for each Feature in response. Valid value is [0, - 10]. If number of stats exists < - [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count], - return all existing stats. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - latest_stats_count = proto.Field( - proto.INT32, - number=7, - ) - - -class ListFeaturesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - - Attributes: - features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): - The Features matching the request. - next_page_token (str): - A token, which can be sent as - [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - features = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class SearchFeaturesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - - Attributes: - location (str): - Required. The resource name of the Location to search - Features. Format: - ``projects/{project}/locations/{location}`` - query (str): - Query string that is a conjunction of field-restricted - queries and/or field-restricted filters. Field-restricted - queries and filters can be combined using ``AND`` to form a - conjunction. - - A field query is in the form FIELD:QUERY. This implicitly - checks if QUERY exists as a substring within Feature's - FIELD. The QUERY and the FIELD are converted to a sequence - of words (i.e. tokens) for comparison. This is done by: - - - Removing leading/trailing whitespace and tokenizing the - search value. Characters that are not one of alphanumeric - [a-zA-Z0-9], underscore [_], or asterisk [*] are treated - as delimiters for tokens. (*) is treated as a wildcard - that matches characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. - - A QUERY must be either a singular token or a phrase. A - phrase is one or multiple words enclosed in double quotation - marks ("). With phrases, the order of the words is - important. Words in the phrase must be matching in order and - consecutively. - - Supported FIELDs for field-restricted queries: - - - ``feature_id`` - - ``description`` - - ``entity_type_id`` - - Examples: - - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature with ID - containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches a - Feature with ID containing the substring ``foo`` and - description containing the substring ``bar``. - - Besides field queries, the following exact-match filters are - supported. The exact-match filters do not support wildcards. - Unlike field-restricted queries, exact-match filters are - case-sensitive. - - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as key - presence. - - ``featurestore_id``: Supports = comparisons. - - Examples: - - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - ``env`` as the key. - page_size (int): - The maximum number of Features to return. The - service may return fewer than this value. If - unspecified, at most 100 Features will be - returned. The maximum value is 100; any value - greater than 100 will be coerced to 100. - page_token (str): - A page token, received from a previous - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures], - except ``page_size``, must match the call that provided the - page token. - """ - - location = proto.Field( - proto.STRING, - number=1, - ) - query = proto.Field( - proto.STRING, - number=3, - ) - page_size = proto.Field( - proto.INT32, - number=4, - ) - page_token = proto.Field( - proto.STRING, - number=5, - ) - - -class SearchFeaturesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - - Attributes: - features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): - The Features matching the request. - - Fields returned: - - - ``name`` - - ``description`` - - ``labels`` - - ``create_time`` - - ``update_time`` - next_page_token (str): - A token, which can be sent as - [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.SearchFeaturesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - features = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. - - Attributes: - feature (google.cloud.aiplatform_v1beta1.types.Feature): - Required. The Feature's ``name`` field is used to identify - the Feature to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be overwritten - in the Features resource by the update. The fields specified - in the update_mask are relative to the resource, not the - full request. A field will be overwritten if it is in the - mask. If the user does not provide a mask then only the - non-empty fields present in the request will be overwritten. - Set the update_mask to ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - """ - - feature = proto.Field( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. - - Attributes: - name (str): - Required. The name of the Features to be deleted. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateFeaturestoreOperationMetadata(proto.Message): - r"""Details of operations that perform create Featurestore. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UpdateFeaturestoreOperationMetadata(proto.Message): - r"""Details of operations that perform update Featurestore. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class ImportFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that perform import feature values. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore import - feature values. - imported_entity_count (int): - Number of entities that have been imported by - the operation. - imported_feature_value_count (int): - Number of feature values that have been - imported by the operation. - invalid_row_count (int): - The number of rows in input source that weren't imported due - to either - - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - imported_entity_count = proto.Field( - proto.INT64, - number=2, - ) - imported_feature_value_count = proto.Field( - proto.INT64, - number=3, - ) - invalid_row_count = proto.Field( - proto.INT64, - number=6, - ) - - -class ExportFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that exports Features values. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore export - Feature values. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class BatchReadFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that batch reads Feature values. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore batch - read Features values. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateEntityTypeOperationMetadata(proto.Message): - r"""Details of operations that perform create EntityType. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for EntityType. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateFeatureOperationMetadata(proto.Message): - r"""Details of operations that perform create Feature. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Feature. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class BatchCreateFeaturesOperationMetadata(proto.Message): - r"""Details of operations that perform batch create Features. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Feature. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py deleted file mode 100644 index 138d0c58e4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import study -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'HyperparameterTuningJob', - }, -) - - -class HyperparameterTuningJob(proto.Message): - r"""Represents a HyperparameterTuningJob. A - HyperparameterTuningJob has a Study specification and multiple - CustomJobs with identical CustomJob specification. - - Attributes: - name (str): - Output only. Resource name of the - HyperparameterTuningJob. - display_name (str): - Required. The display name of the - HyperparameterTuningJob. The name can be up to - 128 characters long and can be consist of any - UTF-8 characters. - study_spec (google.cloud.aiplatform_v1beta1.types.StudySpec): - Required. Study configuration of the - HyperparameterTuningJob. - max_trial_count (int): - Required. The desired total number of Trials. - parallel_trial_count (int): - Required. The desired number of Trials to run - in parallel. - max_failed_trial_count (int): - The number of failed Trials that need to be - seen before failing the HyperparameterTuningJob. - If set to 0, Vertex AI decides how many Trials - must fail before the whole job fails. - trial_job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): - Required. The spec of a trial job. The same - spec applies to the CustomJobs created in all - the trials. - trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): - Output only. Trials of the - HyperparameterTuningJob. - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the job. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the - HyperparameterTuningJob was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the HyperparameterTuningJob for the - first time entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the HyperparameterTuningJob entered - any of the following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the - HyperparameterTuningJob was most recently - updated. - error (google.rpc.status_pb2.Status): - Output only. Only populated when job's state is - JOB_STATE_FAILED or JOB_STATE_CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob.LabelsEntry]): - The labels with user-defined metadata to - organize HyperparameterTuningJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key options for a - HyperparameterTuningJob. If this is set, then - all resources created by the - HyperparameterTuningJob will be encrypted with - the provided encryption key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - study_spec = proto.Field( - proto.MESSAGE, - number=4, - message=study.StudySpec, - ) - max_trial_count = proto.Field( - proto.INT32, - number=5, - ) - parallel_trial_count = proto.Field( - proto.INT32, - number=6, - ) - max_failed_trial_count = proto.Field( - proto.INT32, - number=7, - ) - trial_job_spec = proto.Field( - proto.MESSAGE, - number=8, - message=custom_job.CustomJobSpec, - ) - trials = proto.RepeatedField( - proto.MESSAGE, - number=9, - message=study.Trial, - ) - state = proto.Field( - proto.ENUM, - number=10, - enum=job_state.JobState, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=15, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=16, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=17, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py deleted file mode 100644 index fef443aa5e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py +++ /dev/null @@ -1,142 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import deployed_index_ref -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Index', - }, -) - - -class Index(proto.Message): - r"""A representation of a collection of database items organized - in a way that allows for approximate nearest neighbor (a.k.a - ANN) algorithms search. - - Attributes: - name (str): - Output only. The resource name of the Index. - display_name (str): - Required. The display name of the Index. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Index. - metadata_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing additional information about the Index, - that is specific to it. Unset if the Index does not have any - additional information. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - metadata (google.protobuf.struct_pb2.Value): - An additional information about the Index; the schema of the - metadata can be found in - [metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri]. - deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndexRef]): - Output only. The pointers to DeployedIndexes - created from this Index. An Index can be only - deleted if all its DeployedIndexes had been - undeployed first. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Index.LabelsEntry]): - The labels with user-defined metadata to - organize your Indexes. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Index was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Index was most recently - updated. This also includes any update to the contents of - the Index. Note that Operations working on this Index may - have their - [Operations.metadata.generic_metadata.update_time] - [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] - a little after the value of this timestamp, yet that does - not mean their results are not already reflected in the - Index. Result of any successfully completed Operation on the - Index is reflected in it. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=4, - ) - metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - deployed_indexes = proto.RepeatedField( - proto.MESSAGE, - number=7, - message=deployed_index_ref.DeployedIndexRef, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=9, - ) - create_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py deleted file mode 100644 index d496901da2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py +++ /dev/null @@ -1,308 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'IndexEndpoint', - 'DeployedIndex', - 'DeployedIndexAuthConfig', - 'IndexPrivateEndpoints', - }, -) - - -class IndexEndpoint(proto.Message): - r"""Indexes are deployed into it. An IndexEndpoint can have - multiple DeployedIndexes. - - Attributes: - name (str): - Output only. The resource name of the - IndexEndpoint. - display_name (str): - Required. The display name of the - IndexEndpoint. The name can be up to 128 - characters long and can consist of any UTF-8 - characters. - description (str): - The description of the IndexEndpoint. - deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndex]): - Output only. The indexes deployed in this - endpoint. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint.LabelsEntry]): - The labels with user-defined metadata to - organize your IndexEndpoints. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - IndexEndpoint was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - IndexEndpoint was last updated. This timestamp - is not updated when the endpoint's - DeployedIndexes are updated, e.g. due to updates - of the original Indexes they are the deployments - of. - network (str): - Required. Immutable. The full name of the Google Compute - Engine - `network `__ - to which the IndexEndpoint should be peered. - - Private services access must already be configured for the - network. If left unspecified, the Endpoint is not peered - with any network. - - `Format `__: - projects/{project}/global/networks/{network}. Where - {project} is a project number, as in '12345', and {network} - is network name. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - deployed_indexes = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='DeployedIndex', - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - create_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - network = proto.Field( - proto.STRING, - number=9, - ) - - -class DeployedIndex(proto.Message): - r"""A deployment of an Index. IndexEndpoints contain one or more - DeployedIndexes. - - Attributes: - id (str): - Required. The user specified ID of the - DeployedIndex. The ID can be up to 128 - characters long and must start with a letter and - only contain letters, numbers, and underscores. - The ID must be unique within the project it is - created in. - index (str): - Required. The name of the Index this is the - deployment of. We may refer to this Index as the - DeployedIndex's "original" Index. - display_name (str): - The display name of the DeployedIndex. If not provided upon - creation, the Index's display_name is used. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the DeployedIndex - was created. - private_endpoints (google.cloud.aiplatform_v1beta1.types.IndexPrivateEndpoints): - Output only. Provides paths for users to send requests - directly to the deployed index services running on Cloud via - private services access. This field is populated if - [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] - is configured. - index_sync_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The DeployedIndex may depend on various data on - its original Index. Additionally when certain changes to the - original Index are being done (e.g. when what the Index - contains is being changed) the DeployedIndex may be - asynchronously updated in the background to reflect this - changes. If this timestamp's value is at least the - [Index.update_time][google.cloud.aiplatform.v1beta1.Index.update_time] - of the original Index, it means that this DeployedIndex and - the original Index are in sync. If this timestamp is older, - then to see which updates this DeployedIndex already - contains (and which not), one must - [list][Operations.ListOperations] [Operations][Operation] - [working][Operation.name] on the original Index. Only the - successfully completed Operations with - [Operations.metadata.generic_metadata.update_time] - [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] - equal or before this sync time are contained in this - DeployedIndex. - automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): - Optional. A description of resources that the DeployedIndex - uses, which to large degree are decided by Vertex AI, and - optionally allows only a modest additional configuration. If - min_replica_count is not set, the default value is 1. If - max_replica_count is not set, the default value is - min_replica_count. The max allowed replica count is 1000. - - The user is billed for the resources (at least their minimal - amount) even if the DeployedIndex receives no traffic. - enable_access_logging (bool): - Optional. If true, private endpoint's access - logs are sent to StackDriver Logging. - These logs are like standard server access logs, - containing information like timestamp and - latency for each MatchRequest. - Note that Stackdriver logs may incur a cost, - especially if the deployed index receives a high - queries per second rate (QPS). Estimate your - costs before enabling this option. - deployed_index_auth_config (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig): - Optional. If set, the authentication is - enabled for the private endpoint. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - index = proto.Field( - proto.STRING, - number=2, - ) - display_name = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - private_endpoints = proto.Field( - proto.MESSAGE, - number=5, - message='IndexPrivateEndpoints', - ) - index_sync_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - automatic_resources = proto.Field( - proto.MESSAGE, - number=7, - message=machine_resources.AutomaticResources, - ) - enable_access_logging = proto.Field( - proto.BOOL, - number=8, - ) - deployed_index_auth_config = proto.Field( - proto.MESSAGE, - number=9, - message='DeployedIndexAuthConfig', - ) - - -class DeployedIndexAuthConfig(proto.Message): - r"""Used to set up the auth on the DeployedIndex's private - endpoint. - - Attributes: - auth_provider (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig.AuthProvider): - Defines the authentication provider that the - DeployedIndex uses. - """ - - class AuthProvider(proto.Message): - r"""Configuration for an authentication provider, including support for - `JSON Web Token - (JWT) `__. - - Attributes: - audiences (Sequence[str]): - The list of JWT - `audiences `__. - that are allowed to access. A JWT containing any of these - audiences will be accepted. - allowed_issuers (Sequence[str]): - A list of allowed JWT issuers. Each entry must be a valid - Google service account, in the following format: - - ``service-account-name@project-id.iam.gserviceaccount.com`` - """ - - audiences = proto.RepeatedField( - proto.STRING, - number=1, - ) - allowed_issuers = proto.RepeatedField( - proto.STRING, - number=2, - ) - - auth_provider = proto.Field( - proto.MESSAGE, - number=1, - message=AuthProvider, - ) - - -class IndexPrivateEndpoints(proto.Message): - r"""IndexPrivateEndpoints proto is used to provide paths for - users to send requests via private services access. - - Attributes: - match_grpc_address (str): - Output only. The ip address used to send - match gRPC requests. - """ - - match_grpc_address = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py deleted file mode 100644 index fbf4cd9f7d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py +++ /dev/null @@ -1,343 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateIndexEndpointRequest', - 'CreateIndexEndpointOperationMetadata', - 'GetIndexEndpointRequest', - 'ListIndexEndpointsRequest', - 'ListIndexEndpointsResponse', - 'UpdateIndexEndpointRequest', - 'DeleteIndexEndpointRequest', - 'DeployIndexRequest', - 'DeployIndexResponse', - 'DeployIndexOperationMetadata', - 'UndeployIndexRequest', - 'UndeployIndexResponse', - 'UndeployIndexOperationMetadata', - }, -) - - -class CreateIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - IndexEndpoint in. Format: - ``projects/{project}/locations/{location}`` - index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): - Required. The IndexEndpoint to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - index_endpoint = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index_endpoint.IndexEndpoint, - ) - - -class CreateIndexEndpointOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] - - Attributes: - name (str): - Required. The name of the IndexEndpoint resource. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListIndexEndpointsRequest(proto.Message): - r"""Request message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the IndexEndpoints. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Optional. An expression for filtering the results of the - request. For field names both snake_case and camelCase are - supported. - - - ``index_endpoint`` supports = and !=. ``index_endpoint`` - represents the IndexEndpoint ID, ie. the last segment of - the IndexEndpoint's - [resourcename][google.cloud.aiplatform.v1beta1.IndexEndpoint.name]. - - ``display_name`` supports =, != and regex() (uses - `re2 `__ - syntax) - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality - ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a - key"`. - - Some examples: - - - ``index_endpoint="1"`` - - ``display_name="myDisplayName"`` - - \`regex(display_name, "^A") -> The display name starts - with an A. - - ``labels.myKey="myValue"`` - page_size (int): - Optional. The standard list page size. - page_token (str): - Optional. The standard list page token. Typically obtained - via - [ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsResponse.next_page_token] - of the previous - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. Mask specifying which fields to - read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListIndexEndpointsResponse(proto.Message): - r"""Response message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - - Attributes: - index_endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint]): - List of IndexEndpoints in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - index_endpoints = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.IndexEndpoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. - - Attributes: - index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): - Required. The IndexEndpoint which replaces - the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - index_endpoint = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.IndexEndpoint, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. - - Attributes: - name (str): - Required. The name of the IndexEndpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class DeployIndexRequest(proto.Message): - r"""Request message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - Attributes: - index_endpoint (str): - Required. The name of the IndexEndpoint resource into which - to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): - Required. The DeployedIndex to be created - within the IndexEndpoint. - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index_endpoint.DeployedIndex, - ) - - -class DeployIndexResponse(proto.Message): - r"""Response message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - Attributes: - deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): - The DeployedIndex that had been deployed in - the IndexEndpoint. - """ - - deployed_index = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.DeployedIndex, - ) - - -class DeployIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UndeployIndexRequest(proto.Message): - r"""Request message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - - Attributes: - index_endpoint (str): - Required. The name of the IndexEndpoint resource from which - to undeploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - deployed_index_id (str): - Required. The ID of the DeployedIndex to be - undeployed from the IndexEndpoint. - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -class UndeployIndexResponse(proto.Message): - r"""Response message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - """ - - -class UndeployIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py deleted file mode 100644 index 01bca02406..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py +++ /dev/null @@ -1,354 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index as gca_index -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateIndexRequest', - 'CreateIndexOperationMetadata', - 'GetIndexRequest', - 'ListIndexesRequest', - 'ListIndexesResponse', - 'UpdateIndexRequest', - 'UpdateIndexOperationMetadata', - 'DeleteIndexRequest', - 'NearestNeighborSearchOperationMetadata', - }, -) - - -class CreateIndexRequest(proto.Message): - r"""Request message for - [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Index in. Format: - ``projects/{project}/locations/{location}`` - index (google.cloud.aiplatform_v1beta1.types.Index): - Required. The Index to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - index = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index.Index, - ) - - -class CreateIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): - The operation metadata with regard to - Matching Engine Index operation. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - nearest_neighbor_search_operation_metadata = proto.Field( - proto.MESSAGE, - number=2, - message='NearestNeighborSearchOperationMetadata', - ) - - -class GetIndexRequest(proto.Message): - r"""Request message for - [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] - - Attributes: - name (str): - Required. The name of the Index resource. Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListIndexesRequest(proto.Message): - r"""Request message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the Indexes. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListIndexesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexesResponse.next_page_token] - of the previous - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListIndexesResponse(proto.Message): - r"""Response message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - - Attributes: - indexes (Sequence[google.cloud.aiplatform_v1beta1.types.Index]): - List of indexes in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListIndexesRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - indexes = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_index.Index, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateIndexRequest(proto.Message): - r"""Request message for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. - - Attributes: - index (google.cloud.aiplatform_v1beta1.types.Index): - Required. The Index which updates the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - index = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index.Index, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): - The operation metadata with regard to - Matching Engine Index operation. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - nearest_neighbor_search_operation_metadata = proto.Field( - proto.MESSAGE, - number=2, - message='NearestNeighborSearchOperationMetadata', - ) - - -class DeleteIndexRequest(proto.Message): - r"""Request message for - [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. - - Attributes: - name (str): - Required. The name of the Index resource to be deleted. - Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class NearestNeighborSearchOperationMetadata(proto.Message): - r"""Runtime operation metadata with regard to Matching Engine - Index. - - Attributes: - content_validation_stats (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.ContentValidationStats]): - The validation stats of the content (per file) to be - inserted or updated on the Matching Engine Index resource. - Populated if contentsDeltaUri is provided as part of - [Index.metadata][google.cloud.aiplatform.v1beta1.Index.metadata]. - Please note that, currently for those files that are broken - or has unsupported file format, we will not have the stats - for those files. - """ - - class RecordError(proto.Message): - r""" - Attributes: - error_type (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType): - The error type of this record. - error_message (str): - A human-readable message that is shown to the user to help - them fix the error. Note that this message may change from - time to time, your code should check against error_type as - the source of truth. - source_gcs_uri (str): - Cloud Storage URI pointing to the original - file in user's bucket. - embedding_id (str): - Empty if the embedding id is failed to parse. - raw_record (str): - The original content of this record. - """ - class RecordErrorType(proto.Enum): - r"""""" - ERROR_TYPE_UNSPECIFIED = 0 - EMPTY_LINE = 1 - INVALID_JSON_SYNTAX = 2 - INVALID_CSV_SYNTAX = 3 - INVALID_AVRO_SYNTAX = 4 - INVALID_EMBEDDING_ID = 5 - EMBEDDING_SIZE_MISMATCH = 6 - NAMESPACE_MISSING = 7 - - error_type = proto.Field( - proto.ENUM, - number=1, - enum='NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType', - ) - error_message = proto.Field( - proto.STRING, - number=2, - ) - source_gcs_uri = proto.Field( - proto.STRING, - number=3, - ) - embedding_id = proto.Field( - proto.STRING, - number=4, - ) - raw_record = proto.Field( - proto.STRING, - number=5, - ) - - class ContentValidationStats(proto.Message): - r""" - Attributes: - source_gcs_uri (str): - Cloud Storage URI pointing to the original - file in user's bucket. - valid_record_count (int): - Number of records in this file that were - successfully processed. - invalid_record_count (int): - Number of records in this file we skipped due - to validate errors. - partial_errors (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError]): - The detail information of the partial - failures encountered for those invalid records - that couldn't be parsed. Up to 50 partial errors - will be reported. - """ - - source_gcs_uri = proto.Field( - proto.STRING, - number=1, - ) - valid_record_count = proto.Field( - proto.INT64, - number=2, - ) - invalid_record_count = proto.Field( - proto.INT64, - number=3, - ) - partial_errors = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='NearestNeighborSearchOperationMetadata.RecordError', - ) - - content_validation_stats = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=ContentValidationStats, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py deleted file mode 100644 index c9dc988e79..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py +++ /dev/null @@ -1,189 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'AvroSource', - 'CsvSource', - 'GcsSource', - 'GcsDestination', - 'BigQuerySource', - 'BigQueryDestination', - 'CsvDestination', - 'TFRecordDestination', - 'ContainerRegistryDestination', - }, -) - - -class AvroSource(proto.Message): - r"""The storage details for Avro input content. - Attributes: - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - Required. Google Cloud Storage location. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - message='GcsSource', - ) - - -class CsvSource(proto.Message): - r"""The storage details for CSV input content. - Attributes: - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - Required. Google Cloud Storage location. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - message='GcsSource', - ) - - -class GcsSource(proto.Message): - r"""The Google Cloud Storage location for the input content. - Attributes: - uris (Sequence[str]): - Required. Google Cloud Storage URI(-s) to the - input file(s). May contain wildcards. For more - information on wildcards, see - https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. - """ - - uris = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class GcsDestination(proto.Message): - r"""The Google Cloud Storage location where the output is to be - written to. - - Attributes: - output_uri_prefix (str): - Required. Google Cloud Storage URI to output - directory. If the uri doesn't end with '/', a - '/' will be automatically appended. The - directory is created if it doesn't exist. - """ - - output_uri_prefix = proto.Field( - proto.STRING, - number=1, - ) - - -class BigQuerySource(proto.Message): - r"""The BigQuery location for the input content. - Attributes: - input_uri (str): - Required. BigQuery URI to a table, up to 2000 characters - long. Accepted forms: - - - BigQuery path. For example: - ``bq://projectId.bqDatasetId.bqTableId``. - """ - - input_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class BigQueryDestination(proto.Message): - r"""The BigQuery location for the output content. - Attributes: - output_uri (str): - Required. BigQuery URI to a project or table, up to 2000 - characters long. - - When only the project is specified, the Dataset and Table is - created. When the full table reference is specified, the - Dataset must exist and table must not exist. - - Accepted forms: - - - BigQuery path. For example: ``bq://projectId`` or - ``bq://projectId.bqDatasetId.bqTableId``. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class CsvDestination(proto.Message): - r"""The storage details for CSV output content. - Attributes: - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - Required. Google Cloud Storage location. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - message='GcsDestination', - ) - - -class TFRecordDestination(proto.Message): - r"""The storage details for TFRecord output content. - Attributes: - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - Required. Google Cloud Storage location. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - message='GcsDestination', - ) - - -class ContainerRegistryDestination(proto.Message): - r"""The Container Registry location for the container image. - Attributes: - output_uri (str): - Required. Container Registry URI of a container image. Only - Google Container Registry and Artifact Registry are - supported now. Accepted forms: - - - Google Container Registry path. For example: - ``gcr.io/projectId/imageName:tag``. - - - Artifact Registry path. For example: - ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. - - If a tag is not specified, "latest" will be used as the - default tag. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py deleted file mode 100644 index 49932da1d8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py +++ /dev/null @@ -1,1068 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateCustomJobRequest', - 'GetCustomJobRequest', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'DeleteCustomJobRequest', - 'CancelCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'GetDataLabelingJobRequest', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'DeleteDataLabelingJobRequest', - 'CancelDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'GetHyperparameterTuningJobRequest', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'DeleteHyperparameterTuningJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'GetBatchPredictionJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'DeleteBatchPredictionJobRequest', - 'CancelBatchPredictionJobRequest', - 'CreateModelDeploymentMonitoringJobRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', - 'GetModelDeploymentMonitoringJobRequest', - 'ListModelDeploymentMonitoringJobsRequest', - 'ListModelDeploymentMonitoringJobsResponse', - 'UpdateModelDeploymentMonitoringJobRequest', - 'DeleteModelDeploymentMonitoringJobRequest', - 'PauseModelDeploymentMonitoringJobRequest', - 'ResumeModelDeploymentMonitoringJobRequest', - 'UpdateModelDeploymentMonitoringJobOperationMetadata', - }, -) - - -class CreateCustomJobRequest(proto.Message): - r"""Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - CustomJob in. Format: - ``projects/{project}/locations/{location}`` - custom_job (google.cloud.aiplatform_v1beta1.types.CustomJob): - Required. The CustomJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - custom_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_custom_job.CustomJob, - ) - - -class GetCustomJobRequest(proto.Message): - r"""Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListCustomJobsRequest(proto.Message): - r"""Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsResponse.next_page_token] - of the previous - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListCustomJobsResponse(proto.Message): - r"""Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] - - Attributes: - custom_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob]): - List of CustomJobs in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - custom_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_custom_job.CustomJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteCustomJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob resource to be deleted. - Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelCustomJobRequest(proto.Message): - r"""Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. - - Attributes: - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - data_labeling_job (google.cloud.aiplatform_v1beta1.types.DataLabelingJob): - Required. The DataLabelingJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - data_labeling_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_data_labeling_job.DataLabelingJob, - ) - - -class GetDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListDataLabelingJobsRequest(proto.Message): - r"""Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - - Attributes: - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. FieldMask represents a - set of symbolic field paths. For example, the mask can be - ``paths: "name"``. The "name" here is a field in - DataLabelingJob. If this field is not set, all fields of the - DataLabelingJob are returned. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order by default. Use ``desc`` after a field name - for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDataLabelingJobsResponse(proto.Message): - r"""Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - - Attributes: - data_labeling_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob]): - A list of DataLabelingJobs that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - data_labeling_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_data_labeling_job.DataLabelingJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - hyperparameter_tuning_job (google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob): - Required. The HyperparameterTuningJob to - create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - hyperparameter_tuning_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, - ) - - -class GetHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob resource. - Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListHyperparameterTuningJobsRequest(proto.Message): - r"""Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsResponse.next_page_token] - of the previous - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListHyperparameterTuningJobsResponse(proto.Message): - r"""Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] - - Attributes: - hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob]): - List of HyperparameterTuningJobs in the requested page. - [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials] - of the jobs will be not be returned. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - hyperparameter_tuning_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob resource - to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob to cancel. - Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - batch_prediction_job (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob): - Required. The BatchPredictionJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - batch_prediction_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_batch_prediction_job.BatchPredictionJob, - ) - - -class GetBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListBatchPredictionJobsRequest(proto.Message): - r"""Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - - ``model_display_name`` supports = and != - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsResponse.next_page_token] - of the previous - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListBatchPredictionJobsResponse(proto.Message): - r"""Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] - - Attributes: - batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob]): - List of BatchPredictionJobs in the requested - page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - batch_prediction_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_batch_prediction_job.BatchPredictionJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. - - Attributes: - parent (str): - Required. The parent of the ModelDeploymentMonitoringJob. - Format: ``projects/{project}/locations/{location}`` - model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): - Required. The ModelDeploymentMonitoringJob to - create - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - model_deployment_monitoring_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - ) - - -class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): - r"""Request message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Attributes: - model_deployment_monitoring_job (str): - Required. ModelDeploymentMonitoring Job resource name. - Format: - \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} - deployed_model_id (str): - Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - feature_display_name (str): - The feature display name. If specified, only return the - stats belonging to this feature. Format: - [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name], - example: "user_destination". - objectives (Sequence[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest.StatsAnomaliesObjective]): - Required. Objectives of the stats to - retrieve. - page_size (int): - The standard list page size. - page_token (str): - A page token received from a previous - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] - call. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The earliest timestamp of stats being - generated. If not set, indicates fetching stats - till the earliest possible one. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The latest timestamp of stats being - generated. If not set, indicates feching stats - till the latest possible one. - """ - - class StatsAnomaliesObjective(proto.Message): - r"""Stats requested for specific objective. - Attributes: - type_ (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): - - top_feature_count (int): - If set, all attribution scores between - [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] - and - [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] - are fetched, and page token doesn't take affect in this - case. Only used to retrieve attribution score for the top - Features which has the highest attribution score in the - latest monitoring run. - """ - - type_ = proto.Field( - proto.ENUM, - number=1, - enum=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringObjectiveType, - ) - top_feature_count = proto.Field( - proto.INT32, - number=4, - ) - - model_deployment_monitoring_job = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - feature_display_name = proto.Field( - proto.STRING, - number=3, - ) - objectives = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=StatsAnomaliesObjective, - ) - page_size = proto.Field( - proto.INT32, - number=5, - ) - page_token = proto.Field( - proto.STRING, - number=6, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - - -class SearchModelDeploymentMonitoringStatsAnomaliesResponse(proto.Message): - r"""Response message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Attributes: - monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies]): - Stats retrieved for requested objectives. There are at most - 1000 - [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats] - in the response. - next_page_token (str): - The page token that can be used by the next - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] - call. - """ - - @property - def raw_page(self): - return self - - monitoring_stats = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelDeploymentMonitoringJobsRequest(proto.Message): - r"""Request message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - - Attributes: - parent (str): - Required. The parent of the ModelDeploymentMonitoringJob. - Format: ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelDeploymentMonitoringJobsResponse(proto.Message): - r"""Response message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - - Attributes: - model_deployment_monitoring_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob]): - A list of ModelDeploymentMonitoringJobs that - matches the specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - model_deployment_monitoring_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. - - Attributes: - model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): - Required. The model monitoring configuration - which replaces the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the - resource. - """ - - model_deployment_monitoring_job = proto.Field( - proto.MESSAGE, - number=1, - message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the model monitoring job to - delete. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class PauseModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to pause. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ResumeModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to resume. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class UpdateModelDeploymentMonitoringJobOperationMetadata(proto.Message): - r"""Runtime operation information for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py deleted file mode 100644 index 677ba3b002..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'JobState', - }, -) - - -class JobState(proto.Enum): - r"""Describes the state of a job.""" - JOB_STATE_UNSPECIFIED = 0 - JOB_STATE_QUEUED = 1 - JOB_STATE_PENDING = 2 - JOB_STATE_RUNNING = 3 - JOB_STATE_SUCCEEDED = 4 - JOB_STATE_FAILED = 5 - JOB_STATE_CANCELLING = 6 - JOB_STATE_CANCELLED = 7 - JOB_STATE_PAUSED = 8 - JOB_STATE_EXPIRED = 9 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py deleted file mode 100644 index 597674dbab..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'LineageSubgraph', - }, -) - - -class LineageSubgraph(proto.Message): - r"""A subgraph of the overall lineage graph. Event edges connect - Artifact and Execution nodes. - - Attributes: - artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): - The Artifact nodes in the subgraph. - executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): - The Execution nodes in the subgraph. - events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): - The Event edges between Artifacts and - Executions in the subgraph. - """ - - artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=artifact.Artifact, - ) - executions = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=execution.Execution, - ) - events = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=event.Event, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py deleted file mode 100644 index fa6b9b1e1b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py +++ /dev/null @@ -1,308 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import accelerator_type as gca_accelerator_type - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'MachineSpec', - 'DedicatedResources', - 'AutomaticResources', - 'BatchDedicatedResources', - 'ResourcesConsumed', - 'DiskSpec', - 'AutoscalingMetricSpec', - }, -) - - -class MachineSpec(proto.Message): - r"""Specification of a single machine. - Attributes: - machine_type (str): - Immutable. The type of the machine. - - See the `list of machine types supported for - prediction `__ - - See the `list of machine types supported for custom - training `__. - - For - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - this field is optional, and the default value is - ``n1-standard-2``. For - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] - or as part of - [WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec] - this field is required. - accelerator_type (google.cloud.aiplatform_v1beta1.types.AcceleratorType): - Immutable. The type of accelerator(s) that may be attached - to the machine as per - [accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]. - accelerator_count (int): - The number of accelerators to attach to the - machine. - """ - - machine_type = proto.Field( - proto.STRING, - number=1, - ) - accelerator_type = proto.Field( - proto.ENUM, - number=2, - enum=gca_accelerator_type.AcceleratorType, - ) - accelerator_count = proto.Field( - proto.INT32, - number=3, - ) - - -class DedicatedResources(proto.Message): - r"""A description of resources that are dedicated to a - DeployedModel, and that need a higher degree of manual - configuration. - - Attributes: - machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): - Required. Immutable. The specification of a - single machine used by the prediction. - min_replica_count (int): - Required. Immutable. The minimum number of machine replicas - this DeployedModel will be always deployed on. If traffic - against it increases, it may dynamically be deployed onto - more replicas, and as traffic decreases, some of these extra - replicas may be freed. Note: if - [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] - is above 0, currently the model will be always deployed - precisely on - [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count]. - max_replica_count (int): - Immutable. The maximum number of replicas this DeployedModel - may be deployed on when the traffic against it increases. If - the requested value is too large, the deployment will error, - but if deployment succeeds then the ability to scale the - model to that many replicas is guaranteed (barring service - outages). If traffic against the DeployedModel increases - beyond what its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is not provided, - will use - [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count] - as the default value. - autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1beta1.types.AutoscalingMetricSpec]): - Immutable. The metric specifications that overrides a - resource utilization metric (CPU utilization, accelerator's - duty cycle, and so on) target value (default to 60 if not - set). At most one entry is allowed per metric. - - If - [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] - is above 0, the autoscaling will be based on both CPU - utilization and accelerator's duty cycle metrics and scale - up when either metrics exceeds its target value while scale - down if both metrics are under their target value. The - default target value is 60 for both metrics. - - If - [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] - is 0, the autoscaling will be based on CPU utilization - metric only with default target value 60 if not explicitly - set. - - For example, in the case of Online Prediction, if you want - to override target CPU utilization to 80, you should set - [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.metric_name] - to - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` - and - [autoscaling_metric_specs.target][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.target] - to ``80``. - """ - - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message='MachineSpec', - ) - min_replica_count = proto.Field( - proto.INT32, - number=2, - ) - max_replica_count = proto.Field( - proto.INT32, - number=3, - ) - autoscaling_metric_specs = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='AutoscalingMetricSpec', - ) - - -class AutomaticResources(proto.Message): - r"""A description of resources that to large degree are decided - by Vertex AI, and require only a modest additional - configuration. Each Model supporting these resources documents - its specific guidelines. - - Attributes: - min_replica_count (int): - Immutable. The minimum number of replicas this DeployedModel - will be always deployed on. If traffic against it increases, - it may dynamically be deployed onto more replicas up to - [max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count], - and as traffic decreases, some of these extra replicas may - be freed. If the requested value is too large, the - deployment will error. - max_replica_count (int): - Immutable. The maximum number of replicas - this DeployedModel may be deployed on when the - traffic against it increases. If the requested - value is too large, the deployment will error, - but if deployment succeeds then the ability to - scale the model to that many replicas is - guaranteed (barring service outages). If traffic - against the DeployedModel increases beyond what - its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is - not provided, a no upper bound for scaling under - heavy traffic will be assume, though Vertex AI - may be unable to scale beyond certain replica - number. - """ - - min_replica_count = proto.Field( - proto.INT32, - number=1, - ) - max_replica_count = proto.Field( - proto.INT32, - number=2, - ) - - -class BatchDedicatedResources(proto.Message): - r"""A description of resources that are used for performing batch - operations, are dedicated to a Model, and need manual - configuration. - - Attributes: - machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): - Required. Immutable. The specification of a - single machine. - starting_replica_count (int): - Immutable. The number of machine replicas used at the start - of the batch operation. If not set, Vertex AI decides - starting number, not greater than - [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count] - max_replica_count (int): - Immutable. The maximum number of machine - replicas the batch operation may be scaled to. - The default value is 10. - """ - - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message='MachineSpec', - ) - starting_replica_count = proto.Field( - proto.INT32, - number=2, - ) - max_replica_count = proto.Field( - proto.INT32, - number=3, - ) - - -class ResourcesConsumed(proto.Message): - r"""Statistics information about resource consumption. - Attributes: - replica_hours (float): - Output only. The number of replica hours - used. Note that many replicas may run in - parallel, and additionally any given work may be - queued for some time. Therefore this value is - not strictly related to wall time. - """ - - replica_hours = proto.Field( - proto.DOUBLE, - number=1, - ) - - -class DiskSpec(proto.Message): - r"""Represents the spec of disk options. - Attributes: - boot_disk_type (str): - Type of the boot disk (default is "pd-ssd"). - Valid values: "pd-ssd" (Persistent Disk Solid - State Drive) or "pd-standard" (Persistent Disk - Hard Disk Drive). - boot_disk_size_gb (int): - Size in GB of the boot disk (default is - 100GB). - """ - - boot_disk_type = proto.Field( - proto.STRING, - number=1, - ) - boot_disk_size_gb = proto.Field( - proto.INT32, - number=2, - ) - - -class AutoscalingMetricSpec(proto.Message): - r"""The metric specification that defines the target resource - utilization (CPU utilization, accelerator's duty cycle, and so - on) for calculating the desired replica count. - - Attributes: - metric_name (str): - Required. The resource metric name. Supported metrics: - - - For Online Prediction: - - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` - - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` - target (int): - The target resource utilization in percentage - (1% - 100%) for the given metric; once the real - usage deviates from the target by a certain - percentage, the machine replicas change. The - default value is 60 (representing 60%) if not - provided. - """ - - metric_name = proto.Field( - proto.STRING, - number=1, - ) - target = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py deleted file mode 100644 index d0c6cfc111..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ManualBatchTuningParameters', - }, -) - - -class ManualBatchTuningParameters(proto.Message): - r"""Manual batch tuning parameters. - Attributes: - batch_size (int): - Immutable. The number of the records (e.g. - instances) of the operation given in each batch - to a machine replica. Machine type, and size of - a single record should be considered when - setting this parameter, higher value speeds up - the batch operation's execution, but too high - value will result in a whole batch not fitting - in a machine's memory, and the whole operation - will fail. - The default value is 4. - """ - - batch_size = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py deleted file mode 100644 index 01e89e428e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'MetadataSchema', - }, -) - - -class MetadataSchema(proto.Message): - r"""Instance of a general MetadataSchema. - Attributes: - name (str): - Output only. The resource name of the - MetadataSchema. - schema_version (str): - The version of the MetadataSchema. The version's format must - match the following regular expression: - ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to - order/compare different versions.Example: 1.0.0, 1.0.1, etc. - schema (str): - Required. The raw YAML string representation of the - MetadataSchema. The combination of [MetadataSchema.version] - and the schema name given by ``title`` in - [MetadataSchema.schema] must be unique within a - MetadataStore. - - The schema is defined as an OpenAPI 3.0.2 `MetadataSchema - Object `__ - schema_type (google.cloud.aiplatform_v1beta1.types.MetadataSchema.MetadataSchemaType): - The type of the MetadataSchema. This is a - property that identifies which metadata types - will use the MetadataSchema. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MetadataSchema was created. - description (str): - Description of the Metadata Schema - """ - class MetadataSchemaType(proto.Enum): - r"""Describes the type of the MetadataSchema.""" - METADATA_SCHEMA_TYPE_UNSPECIFIED = 0 - ARTIFACT_TYPE = 1 - EXECUTION_TYPE = 2 - CONTEXT_TYPE = 3 - - name = proto.Field( - proto.STRING, - number=1, - ) - schema_version = proto.Field( - proto.STRING, - number=2, - ) - schema = proto.Field( - proto.STRING, - number=3, - ) - schema_type = proto.Field( - proto.ENUM, - number=4, - enum=MetadataSchemaType, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - description = proto.Field( - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py deleted file mode 100644 index 2f9484546d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py +++ /dev/null @@ -1,1197 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateMetadataStoreRequest', - 'CreateMetadataStoreOperationMetadata', - 'GetMetadataStoreRequest', - 'ListMetadataStoresRequest', - 'ListMetadataStoresResponse', - 'DeleteMetadataStoreRequest', - 'DeleteMetadataStoreOperationMetadata', - 'CreateArtifactRequest', - 'GetArtifactRequest', - 'ListArtifactsRequest', - 'ListArtifactsResponse', - 'UpdateArtifactRequest', - 'CreateContextRequest', - 'GetContextRequest', - 'ListContextsRequest', - 'ListContextsResponse', - 'UpdateContextRequest', - 'DeleteContextRequest', - 'AddContextArtifactsAndExecutionsRequest', - 'AddContextArtifactsAndExecutionsResponse', - 'AddContextChildrenRequest', - 'AddContextChildrenResponse', - 'QueryContextLineageSubgraphRequest', - 'CreateExecutionRequest', - 'GetExecutionRequest', - 'ListExecutionsRequest', - 'ListExecutionsResponse', - 'UpdateExecutionRequest', - 'AddExecutionEventsRequest', - 'AddExecutionEventsResponse', - 'QueryExecutionInputsAndOutputsRequest', - 'CreateMetadataSchemaRequest', - 'GetMetadataSchemaRequest', - 'ListMetadataSchemasRequest', - 'ListMetadataSchemasResponse', - 'QueryArtifactLineageSubgraphRequest', - }, -) - - -class CreateMetadataStoreRequest(proto.Message): - r"""Request message for - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. - - Attributes: - parent (str): - Required. The resource name of the Location - where the MetadataStore should be created. - Format: projects/{project}/locations/{location}/ - metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): - Required. The MetadataStore to create. - metadata_store_id (str): - The {metadatastore} portion of the resource name with the - format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be unique - across all MetadataStores in the parent Location. (Otherwise - the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the preexisting - MetadataStore.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - metadata_store = proto.Field( - proto.MESSAGE, - number=2, - message=gca_metadata_store.MetadataStore, - ) - metadata_store_id = proto.Field( - proto.STRING, - number=3, - ) - - -class CreateMetadataStoreOperationMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for creating a - MetadataStore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetMetadataStoreRequest(proto.Message): - r"""Request message for - [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. - - Attributes: - name (str): - Required. The resource name of the - MetadataStore to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListMetadataStoresRequest(proto.Message): - r"""Request message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - - Attributes: - parent (str): - Required. The Location whose MetadataStores - should be listed. Format: - projects/{project}/locations/{location} - page_size (int): - The maximum number of Metadata Stores to - return. The service may return fewer. - Must be in range 1-1000, inclusive. Defaults to - 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - - -class ListMetadataStoresResponse(proto.Message): - r"""Response message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - - Attributes: - metadata_stores (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataStore]): - The MetadataStores found for the Location. - next_page_token (str): - A token, which can be sent as - [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataStoresRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - metadata_stores = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_metadata_store.MetadataStore, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteMetadataStoreRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. - - Attributes: - name (str): - Required. The resource name of the - MetadataStore to delete. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - force (bool): - If set to true, any child resources of this MetadataStore - will be deleted. (Otherwise, the request will fail with a - FAILED_PRECONDITION error if the MetadataStore has any child - resources.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class DeleteMetadataStoreOperationMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for deleting a - MetadataStore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. - - Attributes: - parent (str): - Required. The resource name of the - MetadataStore where the Artifact should be - created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - artifact (google.cloud.aiplatform_v1beta1.types.Artifact): - Required. The Artifact to create. - artifact_id (str): - The {artifact} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - If not provided, the Artifact's ID will be a UUID generated - by the service. Must be 4-128 characters in length. Valid - characters are /[a-z][0-9]-/. Must be unique across all - Artifacts in the parent MetadataStore. (Otherwise the - request will fail with ALREADY_EXISTS, or PERMISSION_DENIED - if the caller can't view the preexisting Artifact.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - artifact = proto.Field( - proto.MESSAGE, - number=2, - message=gca_artifact.Artifact, - ) - artifact_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. - - Attributes: - name (str): - Required. The resource name of the Artifact - to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListArtifactsRequest(proto.Message): - r"""Request message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - - Attributes: - parent (str): - Required. The MetadataStore whose Artifacts - should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - page_size (int): - The maximum number of Artifacts to return. - The service may return fewer. Must be in range - 1-1000, inclusive. Defaults to 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - filter (str): - Filter specifying the boolean condition for the Artifacts to - satisfy in order to be part of the result set. The syntax to - define filter query is based on https://google.aip.dev/160. - The supported set of filters include the following: - - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` - - **Context based filtering**: To filter Artifacts based on - the contexts to which they belong, use the function - operator with the full resource name - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` - - Each of the above supported filter types can be combined - together using logical operators (``AND`` & ``OR``). - - For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListArtifactsResponse(proto.Message): - r"""Response message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - - Attributes: - artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): - The Artifacts retrieved from the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1beta1.ListArtifactsRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_artifact.Artifact, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. - - Attributes: - artifact (google.cloud.aiplatform_v1beta1.types.Artifact): - Required. The Artifact containing updates. The Artifact's - [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] - field is used to identify the Artifact to be updated. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields - should be updated. - allow_missing (bool): - If set to true, and the - [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not - found, a new - [Artifact][google.cloud.aiplatform.v1beta1.Artifact] will be - created. In this situation, ``update_mask`` is ignored. - """ - - artifact = proto.Field( - proto.MESSAGE, - number=1, - message=gca_artifact.Artifact, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - allow_missing = proto.Field( - proto.BOOL, - number=3, - ) - - -class CreateContextRequest(proto.Message): - r"""Request message for - [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. - - Attributes: - parent (str): - Required. The resource name of the - MetadataStore where the Context should be - created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - context (google.cloud.aiplatform_v1beta1.types.Context): - Required. The Context to create. - context_id (str): - The {context} portion of the resource name with the format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}. - If not provided, the Context's ID will be a UUID generated - by the service. Must be 4-128 characters in length. Valid - characters are /[a-z][0-9]-/. Must be unique across all - Contexts in the parent MetadataStore. (Otherwise the request - will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the - caller can't view the preexisting Context.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - context = proto.Field( - proto.MESSAGE, - number=2, - message=gca_context.Context, - ) - context_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetContextRequest(proto.Message): - r"""Request message for - [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. - - Attributes: - name (str): - Required. The resource name of the Context to - retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListContextsRequest(proto.Message): - r"""Request message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] - - Attributes: - parent (str): - Required. The MetadataStore whose Contexts - should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - page_size (int): - The maximum number of Contexts to return. The - service may return fewer. Must be in range - 1-1000, inclusive. Defaults to 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - filter (str): - Filter specifying the boolean condition for the Contexts to - satisfy in order to be part of the result set. The syntax to - define filter query is based on https://google.aip.dev/160. - Following are the supported set of filters: - - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0``. - - - **Parent Child filtering**: To filter Contexts based on - parent-child relationship use the HAS operator as - follows: - - :: - - parent_contexts: - "projects//locations//metadataStores//contexts/" - child_contexts: - "projects//locations//metadataStores//contexts/" - - Each of the above supported filters can be combined together - using logical operators (``AND`` & ``OR``). - - For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListContextsResponse(proto.Message): - r"""Response message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. - - Attributes: - contexts (Sequence[google.cloud.aiplatform_v1beta1.types.Context]): - The Contexts retrieved from the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListContextsRequest.page_token][google.cloud.aiplatform.v1beta1.ListContextsRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - contexts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_context.Context, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateContextRequest(proto.Message): - r"""Request message for - [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. - - Attributes: - context (google.cloud.aiplatform_v1beta1.types.Context): - Required. The Context containing updates. The Context's - [Context.name][google.cloud.aiplatform.v1beta1.Context.name] - field is used to identify the Context to be updated. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields - should be updated. - allow_missing (bool): - If set to true, and the - [Context][google.cloud.aiplatform.v1beta1.Context] is not - found, a new - [Context][google.cloud.aiplatform.v1beta1.Context] will be - created. In this situation, ``update_mask`` is ignored. - """ - - context = proto.Field( - proto.MESSAGE, - number=1, - message=gca_context.Context, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - allow_missing = proto.Field( - proto.BOOL, - number=3, - ) - - -class DeleteContextRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. - - Attributes: - name (str): - Required. The resource name of the Context to - retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - force (bool): - If set to true, any child resources of this Context will be - deleted. (Otherwise, the request will fail with a - FAILED_PRECONDITION error if the Context has any child - resources, such as another Context, Artifact, or Execution). - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class AddContextArtifactsAndExecutionsRequest(proto.Message): - r"""Request message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - - Attributes: - context (str): - Required. The resource name of the Context - that the Artifacts and Executions belong to. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - artifacts (Sequence[str]): - The resource names of the Artifacts to - attribute to the Context. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - executions (Sequence[str]): - The resource names of the Executions to - associate with the Context. - - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - """ - - context = proto.Field( - proto.STRING, - number=1, - ) - artifacts = proto.RepeatedField( - proto.STRING, - number=2, - ) - executions = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class AddContextArtifactsAndExecutionsResponse(proto.Message): - r"""Response message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - """ - - -class AddContextChildrenRequest(proto.Message): - r"""Request message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - - Attributes: - context (str): - Required. The resource name of the parent - Context. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - child_contexts (Sequence[str]): - The resource names of the child Contexts. - """ - - context = proto.Field( - proto.STRING, - number=1, - ) - child_contexts = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class AddContextChildrenResponse(proto.Message): - r"""Response message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - """ - - -class QueryContextLineageSubgraphRequest(proto.Message): - r"""Request message for - [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. - - Attributes: - context (str): - Required. The resource name of the Context whose Artifacts - and Executions should be retrieved as a LineageSubgraph. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context} - - The request may error with FAILED_PRECONDITION if the number - of Artifacts, the number of Executions, or the number of - Events that would be returned for the Context exceeds 1000. - """ - - context = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. - - Attributes: - parent (str): - Required. The resource name of the - MetadataStore where the Execution should be - created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Required. The Execution to create. - execution_id (str): - The {execution} portion of the resource name with the - format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - If not provided, the Execution's ID will be a UUID generated - by the service. Must be 4-128 characters in length. Valid - characters are /[a-z][0-9]-/. Must be unique across all - Executions in the parent MetadataStore. (Otherwise the - request will fail with ALREADY_EXISTS, or PERMISSION_DENIED - if the caller can't view the preexisting Execution.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - execution = proto.Field( - proto.MESSAGE, - number=2, - message=gca_execution.Execution, - ) - execution_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. - - Attributes: - name (str): - Required. The resource name of the Execution - to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListExecutionsRequest(proto.Message): - r"""Request message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - - Attributes: - parent (str): - Required. The MetadataStore whose Executions - should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - page_size (int): - The maximum number of Executions to return. - The service may return fewer. Must be in range - 1-1000, inclusive. Defaults to 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with an INVALID_ARGUMENT error.) - filter (str): - Filter specifying the boolean condition for the Executions - to satisfy in order to be part of the result set. The syntax - to define filter query is based on - https://google.aip.dev/160. Following are the supported set - of filters: - - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``state``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..`` For example: - ``metadata.field_1.number_value = 10.0`` - - **Context based filtering**: To filter Executions based - on the contexts to which they belong use the function - operator with the full resource name: - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` - - Each of the above supported filters can be combined together - using logical operators (``AND`` & ``OR``). For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListExecutionsResponse(proto.Message): - r"""Response message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - - Attributes: - executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): - The Executions retrieved from the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListExecutionsRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - executions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_execution.Execution, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. - - Attributes: - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Required. The Execution containing updates. The Execution's - [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] - field is used to identify the Execution to be updated. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields - should be updated. - allow_missing (bool): - If set to true, and the - [Execution][google.cloud.aiplatform.v1beta1.Execution] is - not found, a new - [Execution][google.cloud.aiplatform.v1beta1.Execution] will - be created. In this situation, ``update_mask`` is ignored. - """ - - execution = proto.Field( - proto.MESSAGE, - number=1, - message=gca_execution.Execution, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - allow_missing = proto.Field( - proto.BOOL, - number=3, - ) - - -class AddExecutionEventsRequest(proto.Message): - r"""Request message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - - Attributes: - execution (str): - Required. The resource name of the Execution - that the Events connect Artifacts with. - Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): - The Events to create and add. - """ - - execution = proto.Field( - proto.STRING, - number=1, - ) - events = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=event.Event, - ) - - -class AddExecutionEventsResponse(proto.Message): - r"""Response message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - """ - - -class QueryExecutionInputsAndOutputsRequest(proto.Message): - r"""Request message for - [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. - - Attributes: - execution (str): - Required. The resource name of the Execution - whose input and output Artifacts should be - retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution} - """ - - execution = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateMetadataSchemaRequest(proto.Message): - r"""Request message for - [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. - - Attributes: - parent (str): - Required. The resource name of the - MetadataStore where the MetadataSchema should be - created. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): - Required. The MetadataSchema to create. - metadata_schema_id (str): - The {metadata_schema} portion of the resource name with the - format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are /[a-z][0-9]-/. Must be unique - across all MetadataSchemas in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the preexisting - MetadataSchema.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - metadata_schema = proto.Field( - proto.MESSAGE, - number=2, - message=gca_metadata_schema.MetadataSchema, - ) - metadata_schema_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetMetadataSchemaRequest(proto.Message): - r"""Request message for - [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. - - Attributes: - name (str): - Required. The resource name of the - MetadataSchema to retrieve. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema} - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListMetadataSchemasRequest(proto.Message): - r"""Request message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - - Attributes: - parent (str): - Required. The MetadataStore whose - MetadataSchemas should be listed. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore} - page_size (int): - The maximum number of MetadataSchemas to - return. The service may return fewer. - Must be in range 1-1000, inclusive. Defaults to - 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas] - call. Provide this to retrieve the next page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - filter (str): - A query to filter available MetadataSchemas - for matching results. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListMetadataSchemasResponse(proto.Message): - r"""Response message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - - Attributes: - metadata_schemas (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataSchema]): - The MetadataSchemas found for the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataSchemasRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - metadata_schemas = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_metadata_schema.MetadataSchema, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class QueryArtifactLineageSubgraphRequest(proto.Message): - r"""Request message for - [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. - - Attributes: - artifact (str): - Required. The resource name of the Artifact whose Lineage - needs to be retrieved as a LineageSubgraph. Format: - projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact} - - The request may error with FAILED_PRECONDITION if the number - of Artifacts, the number of Executions, or the number of - Events that would be returned for the Context exceeds 1000. - max_hops (int): - Specifies the size of the lineage graph in terms of number - of hops from the specified artifact. Negative Value: - INVALID_ARGUMENT error is returned 0: Only input artifact is - returned. No value: Transitive closure is performed to - return the complete graph. - filter (str): - Filter specifying the boolean condition for the Artifacts to - satisfy in order to be part of the Lineage Subgraph. The - syntax to define filter query is based on - https://google.aip.dev/160. The supported set of filters - include the following: - - - **Attribute filtering**: For example: - ``display_name = "test"`` Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` - - Each of the above supported filter types can be combined - together using logical operators (``AND`` & ``OR``). - - For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - artifact = proto.Field( - proto.STRING, - number=1, - ) - max_hops = proto.Field( - proto.INT32, - number=2, - ) - filter = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py deleted file mode 100644 index 1594416c19..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'MetadataStore', - }, -) - - -class MetadataStore(proto.Message): - r"""Instance of a metadata store. Contains a set of metadata that - can be queried. - - Attributes: - name (str): - Output only. The resource name of the - MetadataStore instance. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MetadataStore was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MetadataStore was last updated. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Metadata Store. If set, this Metadata Store and - all sub-resources of this Metadata Store are - secured using this key. - description (str): - Description of the MetadataStore. - state (google.cloud.aiplatform_v1beta1.types.MetadataStore.MetadataStoreState): - Output only. State information of the - MetadataStore. - """ - - class MetadataStoreState(proto.Message): - r"""Represents state information for a MetadataStore. - Attributes: - disk_utilization_bytes (int): - The disk utilization of the MetadataStore in - bytes. - """ - - disk_utilization_bytes = proto.Field( - proto.INT64, - number=1, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=5, - message=gca_encryption_spec.EncryptionSpec, - ) - description = proto.Field( - proto.STRING, - number=6, - ) - state = proto.Field( - proto.MESSAGE, - number=7, - message=MetadataStoreState, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py deleted file mode 100644 index 0817f504dd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py +++ /dev/null @@ -1,209 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'MigratableResource', - }, -) - - -class MigratableResource(proto.Message): - r"""Represents one resource that exists in automl.googleapis.com, - datalabeling.googleapis.com or ml.googleapis.com. - - Attributes: - ml_engine_model_version (google.cloud.aiplatform_v1beta1.types.MigratableResource.MlEngineModelVersion): - Output only. Represents one Version in - ml.googleapis.com. - automl_model (google.cloud.aiplatform_v1beta1.types.MigratableResource.AutomlModel): - Output only. Represents one Model in - automl.googleapis.com. - automl_dataset (google.cloud.aiplatform_v1beta1.types.MigratableResource.AutomlDataset): - Output only. Represents one Dataset in - automl.googleapis.com. - data_labeling_dataset (google.cloud.aiplatform_v1beta1.types.MigratableResource.DataLabelingDataset): - Output only. Represents one Dataset in - datalabeling.googleapis.com. - last_migrate_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the last - migration attempt on this MigratableResource - started. Will not be set if there's no migration - attempt on this MigratableResource. - last_update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MigratableResource was last updated. - """ - - class MlEngineModelVersion(proto.Message): - r"""Represents one model Version in ml.googleapis.com. - Attributes: - endpoint (str): - The ml.googleapis.com endpoint that this model Version - currently lives in. Example values: - - - ml.googleapis.com - - us-centrall-ml.googleapis.com - - europe-west4-ml.googleapis.com - - asia-east1-ml.googleapis.com - version (str): - Full resource name of ml engine model Version. Format: - ``projects/{project}/models/{model}/versions/{version}``. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.STRING, - number=2, - ) - - class AutomlModel(proto.Message): - r"""Represents one Model in automl.googleapis.com. - Attributes: - model (str): - Full resource name of automl Model. Format: - ``projects/{project}/locations/{location}/models/{model}``. - model_display_name (str): - The Model's display name in - automl.googleapis.com. - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - model_display_name = proto.Field( - proto.STRING, - number=3, - ) - - class AutomlDataset(proto.Message): - r"""Represents one Dataset in automl.googleapis.com. - Attributes: - dataset (str): - Full resource name of automl Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}``. - dataset_display_name (str): - The Dataset's display name in - automl.googleapis.com. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=4, - ) - - class DataLabelingDataset(proto.Message): - r"""Represents one Dataset in datalabeling.googleapis.com. - Attributes: - dataset (str): - Full resource name of data labeling Dataset. Format: - ``projects/{project}/datasets/{dataset}``. - dataset_display_name (str): - The Dataset's display name in - datalabeling.googleapis.com. - data_labeling_annotated_datasets (Sequence[google.cloud.aiplatform_v1beta1.types.MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset]): - The migratable AnnotatedDataset in - datalabeling.googleapis.com belongs to the data - labeling Dataset. - """ - - class DataLabelingAnnotatedDataset(proto.Message): - r"""Represents one AnnotatedDataset in - datalabeling.googleapis.com. - - Attributes: - annotated_dataset (str): - Full resource name of data labeling AnnotatedDataset. - Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. - annotated_dataset_display_name (str): - The AnnotatedDataset's display name in - datalabeling.googleapis.com. - """ - - annotated_dataset = proto.Field( - proto.STRING, - number=1, - ) - annotated_dataset_display_name = proto.Field( - proto.STRING, - number=3, - ) - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=4, - ) - data_labeling_annotated_datasets = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset', - ) - - ml_engine_model_version = proto.Field( - proto.MESSAGE, - number=1, - oneof='resource', - message=MlEngineModelVersion, - ) - automl_model = proto.Field( - proto.MESSAGE, - number=2, - oneof='resource', - message=AutomlModel, - ) - automl_dataset = proto.Field( - proto.MESSAGE, - number=3, - oneof='resource', - message=AutomlDataset, - ) - data_labeling_dataset = proto.Field( - proto.MESSAGE, - number=4, - oneof='resource', - message=DataLabelingDataset, - ) - last_migrate_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - last_update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py deleted file mode 100644 index c851edc95d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py +++ /dev/null @@ -1,439 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import migratable_resource as gca_migratable_resource -from google.cloud.aiplatform_v1beta1.types import operation -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'BatchMigrateResourcesRequest', - 'MigrateResourceRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceResponse', - 'BatchMigrateResourcesOperationMetadata', - }, -) - - -class SearchMigratableResourcesRequest(proto.Message): - r"""Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - - Attributes: - parent (str): - Required. The location that the migratable resources should - be searched from. It's the Vertex AI location that the - resources can be migrated to, not the resources' original - location. Format: - ``projects/{project}/locations/{location}`` - page_size (int): - The standard page size. - The default and maximum value is 100. - page_token (str): - The standard page token. - filter (str): - A filter for your search. You can use the following types of - filters: - - - Resource type filters. The following strings filter for a - specific type of - [MigratableResource][google.cloud.aiplatform.v1beta1.MigratableResource]: - - - ``ml_engine_model_version:*`` - - ``automl_model:*`` - - ``automl_dataset:*`` - - ``data_labeling_dataset:*`` - - - "Migrated or not" filters. The following strings filter - for resources that either have or have not already been - migrated: - - - ``last_migrate_time:*`` filters for migrated - resources. - - ``NOT last_migrate_time:*`` filters for not yet - migrated resources. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class SearchMigratableResourcesResponse(proto.Message): - r"""Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - - Attributes: - migratable_resources (Sequence[google.cloud.aiplatform_v1beta1.types.MigratableResource]): - All migratable resources that can be migrated - to the location specified in the request. - next_page_token (str): - The standard next-page token. The migratable_resources may - not fill page_size in SearchMigratableResourcesRequest even - when there are subsequent pages. - """ - - @property - def raw_page(self): - return self - - migratable_resources = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_migratable_resource.MigratableResource, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class BatchMigrateResourcesRequest(proto.Message): - r"""Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - Attributes: - parent (str): - Required. The location of the migrated resource will live - in. Format: ``projects/{project}/locations/{location}`` - migrate_resource_requests (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]): - Required. The request messages specifying the - resources to migrate. They must be in the same - location as the destination. Up to 50 resources - can be migrated in one batch. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - migrate_resource_requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='MigrateResourceRequest', - ) - - -class MigrateResourceRequest(proto.Message): - r"""Config of migrating one resource from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - Attributes: - migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): - Config for migrating Version in - ml.googleapis.com to Vertex AI's Model. - migrate_automl_model_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlModelConfig): - Config for migrating Model in - automl.googleapis.com to Vertex AI's Model. - migrate_automl_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): - Config for migrating Dataset in - automl.googleapis.com to Vertex AI's Dataset. - migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): - Config for migrating Dataset in - datalabeling.googleapis.com to Vertex AI's - Dataset. - """ - - class MigrateMlEngineModelVersionConfig(proto.Message): - r"""Config for migrating version in ml.googleapis.com to Vertex - AI's Model. - - Attributes: - endpoint (str): - Required. The ml.googleapis.com endpoint that this model - version should be migrated from. Example values: - - - ml.googleapis.com - - - us-centrall-ml.googleapis.com - - - europe-west4-ml.googleapis.com - - - asia-east1-ml.googleapis.com - model_version (str): - Required. Full resource name of ml engine model version. - Format: - ``projects/{project}/models/{model}/versions/{version}``. - model_display_name (str): - Required. Display name of the model in Vertex - AI. System will pick a display name if - unspecified. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - model_version = proto.Field( - proto.STRING, - number=2, - ) - model_display_name = proto.Field( - proto.STRING, - number=3, - ) - - class MigrateAutomlModelConfig(proto.Message): - r"""Config for migrating Model in automl.googleapis.com to Vertex - AI's Model. - - Attributes: - model (str): - Required. Full resource name of automl Model. Format: - ``projects/{project}/locations/{location}/models/{model}``. - model_display_name (str): - Optional. Display name of the model in Vertex - AI. System will pick a display name if - unspecified. - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - model_display_name = proto.Field( - proto.STRING, - number=2, - ) - - class MigrateAutomlDatasetConfig(proto.Message): - r"""Config for migrating Dataset in automl.googleapis.com to - Vertex AI's Dataset. - - Attributes: - dataset (str): - Required. Full resource name of automl Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}``. - dataset_display_name (str): - Required. Display name of the Dataset in - Vertex AI. System will pick a display name if - unspecified. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=2, - ) - - class MigrateDataLabelingDatasetConfig(proto.Message): - r"""Config for migrating Dataset in datalabeling.googleapis.com - to AI Platform's Dataset. - - Attributes: - dataset (str): - Required. Full resource name of data labeling Dataset. - Format: ``projects/{project}/datasets/{dataset}``. - dataset_display_name (str): - Optional. Display name of the Dataset in - Vertex AI. System will pick a display name if - unspecified. - migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): - Optional. Configs for migrating - AnnotatedDataset in datalabeling.googleapis.com - to Vertex AI's SavedQuery. The specified - AnnotatedDatasets have to belong to the - datalabeling Dataset. - """ - - class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): - r"""Config for migrating AnnotatedDataset in - datalabeling.googleapis.com to Vertex AI's SavedQuery. - - Attributes: - annotated_dataset (str): - Required. Full resource name of data labeling - AnnotatedDataset. Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. - """ - - annotated_dataset = proto.Field( - proto.STRING, - number=1, - ) - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=2, - ) - migrate_data_labeling_annotated_dataset_configs = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig', - ) - - migrate_ml_engine_model_version_config = proto.Field( - proto.MESSAGE, - number=1, - oneof='request', - message=MigrateMlEngineModelVersionConfig, - ) - migrate_automl_model_config = proto.Field( - proto.MESSAGE, - number=2, - oneof='request', - message=MigrateAutomlModelConfig, - ) - migrate_automl_dataset_config = proto.Field( - proto.MESSAGE, - number=3, - oneof='request', - message=MigrateAutomlDatasetConfig, - ) - migrate_data_labeling_dataset_config = proto.Field( - proto.MESSAGE, - number=4, - oneof='request', - message=MigrateDataLabelingDatasetConfig, - ) - - -class BatchMigrateResourcesResponse(proto.Message): - r"""Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - Attributes: - migrate_resource_responses (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceResponse]): - Successfully migrated resources. - """ - - migrate_resource_responses = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='MigrateResourceResponse', - ) - - -class MigrateResourceResponse(proto.Message): - r"""Describes a successfully migrated resource. - Attributes: - dataset (str): - Migrated Dataset's resource name. - model (str): - Migrated Model's resource name. - migratable_resource (google.cloud.aiplatform_v1beta1.types.MigratableResource): - Before migration, the identifier in - ml.googleapis.com, automl.googleapis.com or - datalabeling.googleapis.com. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - oneof='migrated_resource', - ) - model = proto.Field( - proto.STRING, - number=2, - oneof='migrated_resource', - ) - migratable_resource = proto.Field( - proto.MESSAGE, - number=3, - message=gca_migratable_resource.MigratableResource, - ) - - -class BatchMigrateResourcesOperationMetadata(proto.Message): - r"""Runtime operation information for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - partial_results (Sequence[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesOperationMetadata.PartialResult]): - Partial results that reflect the latest - migration operation progress. - """ - - class PartialResult(proto.Message): - r"""Represents a partial result in batch migration operation for one - [MigrateResourceRequest][google.cloud.aiplatform.v1beta1.MigrateResourceRequest]. - - Attributes: - error (google.rpc.status_pb2.Status): - The error result of the migration request in - case of failure. - model (str): - Migrated model resource name. - dataset (str): - Migrated dataset resource name. - request (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest): - It's the same as the value in - [MigrateResourceRequest.migrate_resource_requests][]. - """ - - error = proto.Field( - proto.MESSAGE, - number=2, - oneof='result', - message=status_pb2.Status, - ) - model = proto.Field( - proto.STRING, - number=3, - oneof='result', - ) - dataset = proto.Field( - proto.STRING, - number=4, - oneof='result', - ) - request = proto.Field( - proto.MESSAGE, - number=1, - message='MigrateResourceRequest', - ) - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - partial_results = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=PartialResult, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py deleted file mode 100644 index 1a068335a6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py +++ /dev/null @@ -1,752 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import env_var -from google.cloud.aiplatform_v1beta1.types import explanation -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Model', - 'PredictSchemata', - 'ModelContainerSpec', - 'Port', - }, -) - - -class Model(proto.Message): - r"""A trained machine learning Model. - Attributes: - name (str): - The resource name of the Model. - display_name (str): - Required. The display name of the Model. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Model. - predict_schemata (google.cloud.aiplatform_v1beta1.types.PredictSchemata): - The schemata that describe formats of the Model's - predictions and explanations as given and returned via - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - and - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - metadata_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing additional information about the Model, - that is specific to it. Unset if the Model does not have any - additional information. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI, - if no additional metadata is needed, this field is set to an - empty string. Note: The URI given on output will be - immutable and probably different, including the URI scheme, - than the one given on input. The output URI will point to a - location where the user only has a read access. - metadata (google.protobuf.struct_pb2.Value): - Immutable. An additional information about the Model; the - schema of the metadata can be found in - [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri]. - Unset if the Model does not have any additional information. - supported_export_formats (Sequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat]): - Output only. The formats in which this Model - may be exported. If empty, this Model is not - available for export. - training_pipeline (str): - Output only. The resource name of the - TrainingPipeline that uploaded this Model, if - any. - container_spec (google.cloud.aiplatform_v1beta1.types.ModelContainerSpec): - Input only. The specification of the container that is to be - used when deploying this Model. The specification is - ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], - and all binaries it contains are copied and stored - internally by Vertex AI. Not present for AutoML Models. - artifact_uri (str): - Immutable. The path to the directory - containing the Model artifact and any of its - supporting files. Not present for AutoML Models. - supported_deployment_resources_types (Sequence[google.cloud.aiplatform_v1beta1.types.Model.DeploymentResourcesType]): - Output only. When this Model is deployed, its prediction - resources are described by the ``prediction_resources`` - field of the - [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] - object. Because not all Models support all resource - configuration types, the configuration types this Model - supports are listed here. If no configuration types are - listed, the Model cannot be deployed to an - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and - does not support online predictions - ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - or - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). - Such a Model can serve predictions by using a - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], - if it has at least one entry each in - [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] - and - [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. - supported_input_storage_formats (Sequence[str]): - Output only. The formats this Model supports in - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - exists, the instances should be given as per that schema. - - The possible formats are: - - - ``jsonl`` The JSON Lines format, where each instance is a - single line. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``csv`` The CSV format, where each instance is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``tf-record`` The TFRecord format, where each instance is - a single record in tfrecord syntax. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``tf-record-gzip`` Similar to ``tf-record``, but the file - is gzipped. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``bigquery`` Each instance is a single row in BigQuery. - Uses - [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source]. - - - ``file-list`` Each line of the file is the location of an - instance to process, uses ``gcs_source`` field of the - [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] - object. - - If this Model doesn't support any of these formats it means - it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], - it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - or - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - supported_output_storage_formats (Sequence[str]): - Output only. The formats this Model supports in - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. - If both - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - and - [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] - exist, the predictions are returned together with their - instances. In other words, the prediction has the original - instance data first, followed by the actual prediction - content (as per the schema). - - The possible formats are: - - - ``jsonl`` The JSON Lines format, where each prediction is - a single line. Uses - [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. - - - ``csv`` The CSV format, where each prediction is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. - - - ``bigquery`` Each prediction is a single row in a - BigQuery table, uses - [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination] - . - - If this Model doesn't support any of these formats it means - it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], - it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - or - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Model was - uploaded into Vertex AI. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Model was - most recently updated. - deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModelRef]): - Output only. The pointers to DeployedModels - created from this Model. Note that Model could - have been deployed to Endpoints in different - Locations. - explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): - The default explanation specification for this Model. - - The Model can be used for [requesting - explanation][PredictionService.Explain] after being - [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] - if it is populated. The Model can be used for [batch - explanation][BatchPredictionJob.generate_explanation] if it - is populated. - - All fields of the explanation_spec can be overridden by - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - of - [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model], - or - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - of - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - - If the default explanation specification is not set for this - Model, this Model can still be used for [requesting - explanation][PredictionService.Explain] by setting - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - of - [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model] - and for [batch - explanation][BatchPredictionJob.generate_explanation] by - setting - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - of - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Model.LabelsEntry]): - The labels with user-defined metadata to - organize your Models. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Model. If set, this Model and all sub-resources - of this Model will be secured by this key. - """ - class DeploymentResourcesType(proto.Enum): - r"""Identifies a type of Model's prediction resources.""" - DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0 - DEDICATED_RESOURCES = 1 - AUTOMATIC_RESOURCES = 2 - - class ExportFormat(proto.Message): - r"""Represents export format supported by the Model. - All formats export to Google Cloud Storage. - - Attributes: - id (str): - Output only. The ID of the export format. The possible - format IDs are: - - - ``tflite`` Used for Android mobile devices. - - - ``edgetpu-tflite`` Used for `Edge - TPU `__ devices. - - - ``tf-saved-model`` A tensorflow model in SavedModel - format. - - - ``tf-js`` A - `TensorFlow.js `__ model - that can be used in the browser and in Node.js using - JavaScript. - - - ``core-ml`` Used for iOS mobile devices. - - - ``custom-trained`` A Model that was uploaded or trained - by custom code. - exportable_contents (Sequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat.ExportableContent]): - Output only. The content of this Model that - may be exported. - """ - class ExportableContent(proto.Enum): - r"""The Model content that can be exported.""" - EXPORTABLE_CONTENT_UNSPECIFIED = 0 - ARTIFACT = 1 - IMAGE = 2 - - id = proto.Field( - proto.STRING, - number=1, - ) - exportable_contents = proto.RepeatedField( - proto.ENUM, - number=2, - enum='Model.ExportFormat.ExportableContent', - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - predict_schemata = proto.Field( - proto.MESSAGE, - number=4, - message='PredictSchemata', - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=5, - ) - metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - supported_export_formats = proto.RepeatedField( - proto.MESSAGE, - number=20, - message=ExportFormat, - ) - training_pipeline = proto.Field( - proto.STRING, - number=7, - ) - container_spec = proto.Field( - proto.MESSAGE, - number=9, - message='ModelContainerSpec', - ) - artifact_uri = proto.Field( - proto.STRING, - number=26, - ) - supported_deployment_resources_types = proto.RepeatedField( - proto.ENUM, - number=10, - enum=DeploymentResourcesType, - ) - supported_input_storage_formats = proto.RepeatedField( - proto.STRING, - number=11, - ) - supported_output_storage_formats = proto.RepeatedField( - proto.STRING, - number=12, - ) - create_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - deployed_models = proto.RepeatedField( - proto.MESSAGE, - number=15, - message=deployed_model_ref.DeployedModelRef, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=23, - message=explanation.ExplanationSpec, - ) - etag = proto.Field( - proto.STRING, - number=16, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=17, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=24, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class PredictSchemata(proto.Message): - r"""Contains the schemata used in Model's predictions and explanations - via - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] - and - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - - Attributes: - instance_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the format of a single instance, which - are used in - [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], - [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] - and - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - parameters_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the parameters of prediction and - explanation via - [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], - [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] - and - [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI, - if no parameters are supported, then it is set to an empty - string. Note: The URI given on output will be immutable and - probably different, including the URI scheme, than the one - given on input. The output URI will point to a location - where the user only has a read access. - prediction_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the format of a single prediction - produced by this Model, which are returned via - [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], - [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], - and - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - """ - - instance_schema_uri = proto.Field( - proto.STRING, - number=1, - ) - parameters_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - prediction_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - - -class ModelContainerSpec(proto.Message): - r"""Specification of a container for serving predictions. Some fields in - this message correspond to fields in the `Kubernetes Container v1 - core - specification `__. - - Attributes: - image_uri (str): - Required. Immutable. URI of the Docker image to be used as - the custom container for serving predictions. This URI must - identify an image in Artifact Registry or Container - Registry. Learn more about the `container publishing - requirements `__, - including permissions requirements for the AI Platform - Service Agent. - - The container image is ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], - stored internally, and this original path is afterwards not - used. - - To learn about the requirements for the Docker image itself, - see `Custom container - requirements `__. - - You can use the URI to one of Vertex AI's `pre-built - container images for - prediction `__ - in this field. - command (Sequence[str]): - Immutable. Specifies the command that runs when the - container starts. This overrides the container's - `ENTRYPOINT `__. - Specify this field as an array of executable and arguments, - similar to a Docker ``ENTRYPOINT``'s "exec" form, not its - "shell" form. - - If you do not specify this field, then the container's - ``ENTRYPOINT`` runs, in conjunction with the - [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] - field or the container's - ```CMD`` `__, - if either exists. If this field is not specified and the - container does not have an ``ENTRYPOINT``, then refer to the - Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` - interact `__. - - If you specify this field, then you can also specify the - ``args`` field to provide additional arguments for this - command. However, if you specify this field, then the - container's ``CMD`` is ignored. See the `Kubernetes - documentation about how the ``command`` and ``args`` fields - interact with a container's ``ENTRYPOINT`` and - ``CMD`` `__. - - In this field, you can reference `environment variables set - by Vertex - AI `__ - and environment variables set in the - [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] - field. You cannot reference environment variables set in the - Docker image. In order for environment variables to be - expanded, reference them by using the following syntax: - $(VARIABLE_NAME) Note that this differs from Bash variable - expansion, which does not use parentheses. If a variable - cannot be resolved, the reference in the input string is - used unchanged. To avoid variable expansion, you can escape - this syntax with ``$$``; for example: $$(VARIABLE_NAME) This - field corresponds to the ``command`` field of the Kubernetes - Containers `v1 core - API `__. - args (Sequence[str]): - Immutable. Specifies arguments for the command that runs - when the container starts. This overrides the container's - ```CMD`` `__. - Specify this field as an array of executable and arguments, - similar to a Docker ``CMD``'s "default parameters" form. - - If you don't specify this field but do specify the - [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] - field, then the command from the ``command`` field runs - without any additional arguments. See the `Kubernetes - documentation about how the ``command`` and ``args`` fields - interact with a container's ``ENTRYPOINT`` and - ``CMD`` `__. - - If you don't specify this field and don't specify the - ``command`` field, then the container's - ```ENTRYPOINT`` `__ - and ``CMD`` determine what runs based on their default - behavior. See the Docker documentation about `how ``CMD`` - and ``ENTRYPOINT`` - interact `__. - - In this field, you can reference `environment variables set - by Vertex - AI `__ - and environment variables set in the - [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] - field. You cannot reference environment variables set in the - Docker image. In order for environment variables to be - expanded, reference them by using the following syntax: - $(VARIABLE_NAME) Note that this differs from Bash variable - expansion, which does not use parentheses. If a variable - cannot be resolved, the reference in the input string is - used unchanged. To avoid variable expansion, you can escape - this syntax with ``$$``; for example: $$(VARIABLE_NAME) This - field corresponds to the ``args`` field of the Kubernetes - Containers `v1 core - API `__. - env (Sequence[google.cloud.aiplatform_v1beta1.types.EnvVar]): - Immutable. List of environment variables to set in the - container. After the container starts running, code running - in the container can read these environment variables. - - Additionally, the - [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] - and - [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] - fields can reference these variables. Later entries in this - list can also reference earlier entries. For example, the - following example sets the variable ``VAR_2`` to have the - value ``foo bar``: - - .. code:: json - - [ - { - "name": "VAR_1", - "value": "foo" - }, - { - "name": "VAR_2", - "value": "$(VAR_1) bar" - } - ] - - If you switch the order of the variables in the example, - then the expansion does not occur. - - This field corresponds to the ``env`` field of the - Kubernetes Containers `v1 core - API `__. - ports (Sequence[google.cloud.aiplatform_v1beta1.types.Port]): - Immutable. List of ports to expose from the container. - Vertex AI sends any prediction requests that it receives to - the first port on this list. AI Platform also sends - `liveness and health - checks `__ - to this port. - - If you do not specify this field, it defaults to following - value: - - .. code:: json - - [ - { - "containerPort": 8080 - } - ] - - Vertex AI does not use ports other than the first one - listed. This field corresponds to the ``ports`` field of the - Kubernetes Containers `v1 core - API `__. - predict_route (str): - Immutable. HTTP path on the container to send prediction - requests to. Vertex AI forwards requests sent using - [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - to this path on the container's IP address and port. Vertex - AI then returns the container's response in the API - response. - - For example, if you set this field to ``/foo``, then when - Vertex AI receives a prediction request, it forwards the - request body in a POST request to the ``/foo`` path on the - port of your container specified by the first value of this - ``ModelContainerSpec``'s - [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] - field. - - If you don't specify this field, it defaults to the - following value when you [deploy this Model to an - Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: - /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict - The placeholders in this value are replaced as follows: - - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) - health_route (str): - Immutable. HTTP path on the container to send health checks - to. Vertex AI intermittently sends GET requests to this path - on the container's IP address and port to check that the - container is healthy. Read more about `health - checks `__. - - For example, if you set this field to ``/bar``, then Vertex - AI intermittently sends a GET request to the ``/bar`` path - on the port of your container specified by the first value - of this ``ModelContainerSpec``'s - [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] - field. - - If you don't specify this field, it defaults to the - following value when you [deploy this Model to an - Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: - /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict - The placeholders in this value are replaced as follows: - - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) - """ - - image_uri = proto.Field( - proto.STRING, - number=1, - ) - command = proto.RepeatedField( - proto.STRING, - number=2, - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - env = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=env_var.EnvVar, - ) - ports = proto.RepeatedField( - proto.MESSAGE, - number=5, - message='Port', - ) - predict_route = proto.Field( - proto.STRING, - number=6, - ) - health_route = proto.Field( - proto.STRING, - number=7, - ) - - -class Port(proto.Message): - r"""Represents a network port in a container. - Attributes: - container_port (int): - The number of the port to expose on the pod's - IP address. Must be a valid port number, between - 1 and 65535 inclusive. - """ - - container_port = proto.Field( - proto.INT32, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py deleted file mode 100644 index db9cad318c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ /dev/null @@ -1,407 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import model_monitoring -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ModelDeploymentMonitoringObjectiveType', - 'ModelDeploymentMonitoringJob', - 'ModelDeploymentMonitoringBigQueryTable', - 'ModelDeploymentMonitoringObjectiveConfig', - 'ModelDeploymentMonitoringScheduleConfig', - 'ModelMonitoringStatsAnomalies', - }, -) - - -class ModelDeploymentMonitoringObjectiveType(proto.Enum): - r"""The Model Monitoring Objective types.""" - MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED = 0 - RAW_FEATURE_SKEW = 1 - RAW_FEATURE_DRIFT = 2 - FEATURE_ATTRIBUTION_SKEW = 3 - FEATURE_ATTRIBUTION_DRIFT = 4 - - -class ModelDeploymentMonitoringJob(proto.Message): - r"""Represents a job that runs periodically to monitor the - deployed models in an endpoint. It will analyze the logged - training & prediction data to detect any abnormal behaviors. - - Attributes: - name (str): - Output only. Resource name of a - ModelDeploymentMonitoringJob. - display_name (str): - Required. The user-defined name of the - ModelDeploymentMonitoringJob. The name can be up - to 128 characters long and can be consist of any - UTF-8 characters. - Display name of a ModelDeploymentMonitoringJob. - endpoint (str): - Required. Endpoint resource name. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the - monitoring job. When the job is still creating, - the state will be 'PENDING'. Once the job is - successfully created, the state will be - 'RUNNING'. Pause the job, the state will be - 'PAUSED'. - Resume the job, the state will return to - 'RUNNING'. - schedule_state (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState): - Output only. Schedule state when the - monitoring job is in Running state. - model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveConfig]): - Required. The config for monitoring - objectives. This is a per DeployedModel config. - Each DeployedModel needs to be configed - separately. - model_deployment_monitoring_schedule_config (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringScheduleConfig): - Required. Schedule config for running the - monitoring job. - logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): - Required. Sample Strategy for logging. - model_monitoring_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig): - Alert config for model monitoring. - predict_instance_schema_uri (str): - YAML schema file uri describing the format of - a single instance, which are given to format - this Endpoint's prediction (and explanation). If - not set, we will generate predict schema from - collected predict requests. - sample_predict_instance (google.protobuf.struct_pb2.Value): - Sample Predict instance, same format as - [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], - this can be set as a replacement of - [ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri]. - If not set, we will generate predict schema from collected - predict requests. - analysis_instance_schema_uri (str): - YAML schema file uri describing the format of a single - instance that you want Tensorflow Data Validation (TFDV) to - analyze. - - If this field is empty, all the feature data types are - inferred from - [predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri], - meaning that TFDV will use the data in the exact format(data - type) as prediction request/response. If there are any data - type differences between predict instance and TFDV instance, - this field can be used to override the schema. For models - trained with Vertex AI, this field must be set as all the - fields in predict instance formatted as string. - bigquery_tables (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable]): - Output only. The created bigquery tables for - the job under customer project. Customer could - do their own query & analysis. There could be 4 - log tables in maximum: - 1. Training data logging predict - request/response 2. Serving data logging predict - request/response - log_ttl (google.protobuf.duration_pb2.Duration): - The TTL of BigQuery tables in user projects - which stores logs. A day is the basic unit of - the TTL and we take the ceil of TTL/86400(a - day). e.g. { second: 3600} indicates ttl = 1 - day. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.LabelsEntry]): - The labels with user-defined metadata to - organize your ModelDeploymentMonitoringJob. - - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelDeploymentMonitoringJob was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelDeploymentMonitoringJob was updated most - recently. - next_schedule_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this monitoring - pipeline will be scheduled to run for the next - round. - stats_anomalies_base_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): - Stats anomalies base folder path. - """ - class MonitoringScheduleState(proto.Enum): - r"""The state to Specify the monitoring pipeline.""" - MONITORING_SCHEDULE_STATE_UNSPECIFIED = 0 - PENDING = 1 - OFFLINE = 2 - RUNNING = 3 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - endpoint = proto.Field( - proto.STRING, - number=3, - ) - state = proto.Field( - proto.ENUM, - number=4, - enum=job_state.JobState, - ) - schedule_state = proto.Field( - proto.ENUM, - number=5, - enum=MonitoringScheduleState, - ) - model_deployment_monitoring_objective_configs = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='ModelDeploymentMonitoringObjectiveConfig', - ) - model_deployment_monitoring_schedule_config = proto.Field( - proto.MESSAGE, - number=7, - message='ModelDeploymentMonitoringScheduleConfig', - ) - logging_sampling_strategy = proto.Field( - proto.MESSAGE, - number=8, - message=model_monitoring.SamplingStrategy, - ) - model_monitoring_alert_config = proto.Field( - proto.MESSAGE, - number=15, - message=model_monitoring.ModelMonitoringAlertConfig, - ) - predict_instance_schema_uri = proto.Field( - proto.STRING, - number=9, - ) - sample_predict_instance = proto.Field( - proto.MESSAGE, - number=19, - message=struct_pb2.Value, - ) - analysis_instance_schema_uri = proto.Field( - proto.STRING, - number=16, - ) - bigquery_tables = proto.RepeatedField( - proto.MESSAGE, - number=10, - message='ModelDeploymentMonitoringBigQueryTable', - ) - log_ttl = proto.Field( - proto.MESSAGE, - number=17, - message=duration_pb2.Duration, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - create_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - next_schedule_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - stats_anomalies_base_directory = proto.Field( - proto.MESSAGE, - number=20, - message=io.GcsDestination, - ) - - -class ModelDeploymentMonitoringBigQueryTable(proto.Message): - r"""ModelDeploymentMonitoringBigQueryTable specifies the BigQuery - table name as well as some information of the logs stored in - this table. - - Attributes: - log_source (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogSource): - The source of log. - log_type (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogType): - The type of log. - bigquery_table_path (str): - The created BigQuery table to store logs. Customer could do - their own query & analysis. Format: - ``bq://.model_deployment_monitoring_._`` - """ - class LogSource(proto.Enum): - r"""Indicates where does the log come from.""" - LOG_SOURCE_UNSPECIFIED = 0 - TRAINING = 1 - SERVING = 2 - - class LogType(proto.Enum): - r"""Indicates what type of traffic does the log belong to.""" - LOG_TYPE_UNSPECIFIED = 0 - PREDICT = 1 - EXPLAIN = 2 - - log_source = proto.Field( - proto.ENUM, - number=1, - enum=LogSource, - ) - log_type = proto.Field( - proto.ENUM, - number=2, - enum=LogType, - ) - bigquery_table_path = proto.Field( - proto.STRING, - number=3, - ) - - -class ModelDeploymentMonitoringObjectiveConfig(proto.Message): - r"""ModelDeploymentMonitoringObjectiveConfig contains the pair of - deployed_model_id to ModelMonitoringObjectiveConfig. - - Attributes: - deployed_model_id (str): - The DeployedModel ID of the objective config. - objective_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig): - The objective config of for the - modelmonitoring job of this deployed model. - """ - - deployed_model_id = proto.Field( - proto.STRING, - number=1, - ) - objective_config = proto.Field( - proto.MESSAGE, - number=2, - message=model_monitoring.ModelMonitoringObjectiveConfig, - ) - - -class ModelDeploymentMonitoringScheduleConfig(proto.Message): - r"""The config for scheduling monitoring job. - Attributes: - monitor_interval (google.protobuf.duration_pb2.Duration): - Required. The model monitoring job running - interval. It will be rounded up to next full - hour. - """ - - monitor_interval = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - - -class ModelMonitoringStatsAnomalies(proto.Message): - r"""Statistics and anomalies generated by Model Monitoring. - Attributes: - objective (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): - Model Monitoring Objective those stats and - anomalies belonging to. - deployed_model_id (str): - Deployed Model ID. - anomaly_count (int): - Number of anomalies within all stats. - feature_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies]): - A list of historical Stats and Anomalies - generated for all Features. - """ - - class FeatureHistoricStatsAnomalies(proto.Message): - r"""Historical Stats (and Anomalies) for a specific Feature. - Attributes: - feature_display_name (str): - Display Name of the Feature. - threshold (google.cloud.aiplatform_v1beta1.types.ThresholdConfig): - Threshold for anomaly detection. - training_stats (google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly): - Stats calculated for the Training Dataset. - prediction_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): - A list of historical stats generated by - different time window's Prediction Dataset. - """ - - feature_display_name = proto.Field( - proto.STRING, - number=1, - ) - threshold = proto.Field( - proto.MESSAGE, - number=3, - message=model_monitoring.ThresholdConfig, - ) - training_stats = proto.Field( - proto.MESSAGE, - number=4, - message=feature_monitoring_stats.FeatureStatsAnomaly, - ) - prediction_stats = proto.RepeatedField( - proto.MESSAGE, - number=5, - message=feature_monitoring_stats.FeatureStatsAnomaly, - ) - - objective = proto.Field( - proto.ENUM, - number=1, - enum='ModelDeploymentMonitoringObjectiveType', - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - anomaly_count = proto.Field( - proto.INT32, - number=3, - ) - feature_stats = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=FeatureHistoricStatsAnomalies, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py deleted file mode 100644 index e2f6641aba..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py +++ /dev/null @@ -1,133 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import explanation -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ModelEvaluation', - }, -) - - -class ModelEvaluation(proto.Message): - r"""A collection of metrics calculated by comparing Model's - predictions on all of the test data against annotations from the - test data. - - Attributes: - name (str): - Output only. The resource name of the - ModelEvaluation. - metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the - [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] - of this ModelEvaluation. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - metrics (google.protobuf.struct_pb2.Value): - Output only. Evaluation metrics of the Model. The schema of - the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics_schema_uri] - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelEvaluation was created. - slice_dimensions (Sequence[str]): - Output only. All possible - [dimensions][ModelEvaluationSlice.slice.dimension] of - ModelEvaluationSlices. The dimensions can be used as the - filter of the - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] - request, in the form of ``slice.dimension = ``. - model_explanation (google.cloud.aiplatform_v1beta1.types.ModelExplanation): - Output only. Aggregated explanation metrics - for the Model's prediction output over the data - this ModelEvaluation uses. This field is - populated only if the Model is evaluated with - explanations, and only for AutoML tabular - Models. - explanation_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation.ModelEvaluationExplanationSpec]): - Output only. Describes the values of - [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] - that are used for explaining the predicted values on the - evaluated data. - """ - - class ModelEvaluationExplanationSpec(proto.Message): - r""" - Attributes: - explanation_type (str): - Explanation type. - - For AutoML Image Classification models, possible values are: - - - ``image-integrated-gradients`` - - ``image-xrai`` - explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): - Explanation spec details. - """ - - explanation_type = proto.Field( - proto.STRING, - number=1, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=2, - message=explanation.ExplanationSpec, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - metrics_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - metrics = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - slice_dimensions = proto.RepeatedField( - proto.STRING, - number=5, - ) - model_explanation = proto.Field( - proto.MESSAGE, - number=8, - message=explanation.ModelExplanation, - ) - explanation_specs = proto.RepeatedField( - proto.MESSAGE, - number=9, - message=ModelEvaluationExplanationSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py deleted file mode 100644 index ffa3738946..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ModelEvaluationSlice', - }, -) - - -class ModelEvaluationSlice(proto.Message): - r"""A collection of metrics calculated by comparing Model's - predictions on a slice of the test data against ground truth - annotations. - - Attributes: - name (str): - Output only. The resource name of the - ModelEvaluationSlice. - slice_ (google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice.Slice): - Output only. The slice of the test data that - is used to evaluate the Model. - metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the - [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] - of this ModelEvaluationSlice. The schema is defined as an - OpenAPI 3.0.2 `Schema - Object `__. - metrics (google.protobuf.struct_pb2.Value): - Output only. Sliced evaluation metrics of the Model. The - schema of the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics_schema_uri] - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelEvaluationSlice was created. - """ - - class Slice(proto.Message): - r"""Definition of a slice. - Attributes: - dimension (str): - Output only. The dimension of the slice. Well-known - dimensions are: - - - ``annotationSpec``: This slice is on the test data that - has either ground truth or prediction with - [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name] - equals to - [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value]. - value (str): - Output only. The value of the dimension in - this slice. - """ - - dimension = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - slice_ = proto.Field( - proto.MESSAGE, - number=2, - message=Slice, - ) - metrics_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - metrics = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py deleted file mode 100644 index 897ec7a0b6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py +++ /dev/null @@ -1,251 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import io - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ModelMonitoringObjectiveConfig', - 'ModelMonitoringAlertConfig', - 'ThresholdConfig', - 'SamplingStrategy', - }, -) - - -class ModelMonitoringObjectiveConfig(proto.Message): - r"""Next ID: 6 - Attributes: - training_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingDataset): - Training dataset for models. This field has - to be set only if - TrainingPredictionSkewDetectionConfig is - specified. - training_prediction_skew_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig): - The config for skew between training data and - prediction data. - prediction_drift_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): - The config for drift of prediction data. - """ - - class TrainingDataset(proto.Message): - r"""Training Dataset information. - Attributes: - dataset (str): - The resource name of the Dataset used to - train this Model. - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - The Google Cloud Storage uri of the unmanaged - Dataset used to train this Model. - bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): - The BigQuery table of the unmanaged Dataset - used to train this Model. - data_format (str): - Data format of the dataset, only applicable - if the input is from Google Cloud Storage. - The possible formats are: - - "tf-record" - The source file is a TFRecord file. - - "csv" - The source file is a CSV file. - target_field (str): - The target field name the model is to - predict. This field will be excluded when doing - Predict and (or) Explain for the training data. - logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): - Strategy to sample data from Training - Dataset. If not set, we process the whole - dataset. - """ - - dataset = proto.Field( - proto.STRING, - number=3, - oneof='data_source', - ) - gcs_source = proto.Field( - proto.MESSAGE, - number=4, - oneof='data_source', - message=io.GcsSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=5, - oneof='data_source', - message=io.BigQuerySource, - ) - data_format = proto.Field( - proto.STRING, - number=2, - ) - target_field = proto.Field( - proto.STRING, - number=6, - ) - logging_sampling_strategy = proto.Field( - proto.MESSAGE, - number=7, - message='SamplingStrategy', - ) - - class TrainingPredictionSkewDetectionConfig(proto.Message): - r"""The config for Training & Prediction data skew detection. It - specifies the training dataset sources and the skew detection - parameters. - - Attributes: - skew_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.SkewThresholdsEntry]): - Key is the feature name and value is the - threshold. If a feature needs to be monitored - for skew, a value threshold must be configed for - that feature. The threshold here is against - feature distribution distance between the - training and prediction feature. - """ - - skew_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message='ThresholdConfig', - ) - - class PredictionDriftDetectionConfig(proto.Message): - r"""The config for Prediction data drift detection. - Attributes: - drift_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]): - Key is the feature name and value is the - threshold. If a feature needs to be monitored - for drift, a value threshold must be configed - for that feature. The threshold here is against - feature distribution distance between different - time windws. - """ - - drift_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message='ThresholdConfig', - ) - - training_dataset = proto.Field( - proto.MESSAGE, - number=1, - message=TrainingDataset, - ) - training_prediction_skew_detection_config = proto.Field( - proto.MESSAGE, - number=2, - message=TrainingPredictionSkewDetectionConfig, - ) - prediction_drift_detection_config = proto.Field( - proto.MESSAGE, - number=3, - message=PredictionDriftDetectionConfig, - ) - - -class ModelMonitoringAlertConfig(proto.Message): - r"""Next ID: 2 - Attributes: - email_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig.EmailAlertConfig): - Email alert config. - """ - - class EmailAlertConfig(proto.Message): - r"""The config for email alert. - Attributes: - user_emails (Sequence[str]): - The email addresses to send the alert. - """ - - user_emails = proto.RepeatedField( - proto.STRING, - number=1, - ) - - email_alert_config = proto.Field( - proto.MESSAGE, - number=1, - oneof='alert', - message=EmailAlertConfig, - ) - - -class ThresholdConfig(proto.Message): - r"""The config for feature monitoring threshold. - Next ID: 3 - - Attributes: - value (float): - Specify a threshold value that can trigger - the alert. If this threshold config is for - feature distribution distance: 1. For - categorical feature, the distribution distance - is calculated by L-inifinity norm. - 2. For numerical feature, the distribution - distance is calculated by Jensen–Shannon - divergence. - Each feature must have a non-zero threshold if - they need to be monitored. Otherwise no alert - will be triggered for that feature. - """ - - value = proto.Field( - proto.DOUBLE, - number=1, - oneof='threshold', - ) - - -class SamplingStrategy(proto.Message): - r"""Sampling Strategy for logging, can be for both training and - prediction dataset. - Next ID: 2 - - Attributes: - random_sample_config (google.cloud.aiplatform_v1beta1.types.SamplingStrategy.RandomSampleConfig): - Random sample config. Will support more - sampling strategies later. - """ - - class RandomSampleConfig(proto.Message): - r"""Requests are randomly selected. - Attributes: - sample_rate (float): - Sample rate (0, 1] - """ - - sample_rate = proto.Field( - proto.DOUBLE, - number=1, - ) - - random_sample_config = proto.Field( - proto.MESSAGE, - number=1, - message=RandomSampleConfig, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py deleted file mode 100644 index 32300b287a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py +++ /dev/null @@ -1,569 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'UploadModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelResponse', - 'GetModelRequest', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'DeleteModelRequest', - 'ExportModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'GetModelEvaluationSliceRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - }, -) - - -class UploadModelRequest(proto.Message): - r"""Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. - - Attributes: - parent (str): - Required. The resource name of the Location into which to - upload the Model. Format: - ``projects/{project}/locations/{location}`` - model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - model = proto.Field( - proto.MESSAGE, - number=2, - message=gca_model.Model, - ) - - -class UploadModelOperationMetadata(proto.Message): - r"""Details of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - operation. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UploadModelResponse(proto.Message): - r"""Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - operation. - - Attributes: - model (str): - The name of the uploaded Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - - -class GetModelRequest(proto.Message): - r"""Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. - - Attributes: - name (str): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelsRequest(proto.Message): - r"""Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - An expression for filtering the results of the request. For - field names both snake_case and camelCase are supported. - - - ``model`` supports = and !=. ``model`` represents the - Model ID, i.e. the last segment of the Model's [resource - name][google.cloud.aiplatform.v1beta1.Model.name]. - - ``display_name`` supports = and != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``model=1234`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelsResponse.next_page_token] - of the previous - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelsResponse(proto.Message): - r"""Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] - - Attributes: - models (Sequence[google.cloud.aiplatform_v1beta1.types.Model]): - List of Models in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - models = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateModelRequest(proto.Message): - r"""Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. - - Attributes: - model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - model = proto.Field( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteModelRequest(proto.Message): - r"""Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. - - Attributes: - name (str): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ExportModelRequest(proto.Message): - r"""Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. - - Attributes: - name (str): - Required. The resource name of the Model to export. Format: - ``projects/{project}/locations/{location}/models/{model}`` - output_config (google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig): - Required. The desired output location and - configuration. - """ - - class OutputConfig(proto.Message): - r"""Output configuration for the Model export. - Attributes: - export_format_id (str): - The ID of the format in which the Model must be exported. - Each Model lists the [export formats it - supports][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - If no value is provided here, then the first from the list - of the Model's supported formats is used by default. - artifact_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Cloud Storage location where the Model artifact is to be - written to. Under the directory given as the destination a - new one with name - "``model-export--``", - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format, will be created. Inside, the Model and any of its - supporting files will be written. This field should only be - set when the ``exportableContent`` field of the - [Model.supported_export_formats] object contains - ``ARTIFACT``. - image_destination (google.cloud.aiplatform_v1beta1.types.ContainerRegistryDestination): - The Google Container Registry or Artifact Registry uri where - the Model container image will be copied to. This field - should only be set when the ``exportableContent`` field of - the [Model.supported_export_formats] object contains - ``IMAGE``. - """ - - export_format_id = proto.Field( - proto.STRING, - number=1, - ) - artifact_destination = proto.Field( - proto.MESSAGE, - number=3, - message=io.GcsDestination, - ) - image_destination = proto.Field( - proto.MESSAGE, - number=4, - message=io.ContainerRegistryDestination, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - output_config = proto.Field( - proto.MESSAGE, - number=2, - message=OutputConfig, - ) - - -class ExportModelOperationMetadata(proto.Message): - r"""Details of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] - operation. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - output_info (google.cloud.aiplatform_v1beta1.types.ExportModelOperationMetadata.OutputInfo): - Output only. Information further describing - the output of this Model export. - """ - - class OutputInfo(proto.Message): - r"""Further describes the output of the ExportModel. Supplements - [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1beta1.ExportModelRequest.OutputConfig]. - - Attributes: - artifact_output_uri (str): - Output only. If the Model artifact is being - exported to Google Cloud Storage this is the - full path of the directory created, into which - the Model files are being written to. - image_output_uri (str): - Output only. If the Model image is being - exported to Google Container Registry or - Artifact Registry this is the full path of the - image created. - """ - - artifact_output_uri = proto.Field( - proto.STRING, - number=2, - ) - image_output_uri = proto.Field( - proto.STRING, - number=3, - ) - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - output_info = proto.Field( - proto.MESSAGE, - number=2, - message=OutputInfo, - ) - - -class ExportModelResponse(proto.Message): - r"""Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] - operation. - """ - - -class GetModelEvaluationRequest(proto.Message): - r"""Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. - - Attributes: - name (str): - Required. The name of the ModelEvaluation resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationsRequest(proto.Message): - r"""Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - - Attributes: - parent (str): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsResponse.next_page_token] - of the previous - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelEvaluationsResponse(proto.Message): - r"""Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - - Attributes: - model_evaluations (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation]): - List of ModelEvaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=model_evaluation.ModelEvaluation, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetModelEvaluationSliceRequest(proto.Message): - r"""Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. - - Attributes: - name (str): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationSlicesRequest(proto.Message): - r"""Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - - Attributes: - parent (str): - Required. The resource name of the ModelEvaluation to list - the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - filter (str): - The standard list filter. - - - ``slice.dimension`` - for =. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesResponse.next_page_token] - of the previous - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelEvaluationSlicesResponse(proto.Message): - r"""Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - - Attributes: - model_evaluation_slices (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice]): - List of ModelEvaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluation_slices = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=model_evaluation_slice.ModelEvaluationSlice, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py deleted file mode 100644 index c047e3c60c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'GenericOperationMetadata', - 'DeleteOperationMetadata', - }, -) - - -class GenericOperationMetadata(proto.Message): - r"""Generic Metadata shared by all operations. - Attributes: - partial_failures (Sequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - E.g. single files that couldn't be read. - This field should never exceed 20 entries. - Status details field will contain standard GCP - error details. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - updated for the last time. If the operation has - finished (successfully or not), this is the - finish time. - """ - - partial_failures = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=status_pb2.Status, - ) - create_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteOperationMetadata(proto.Message): - r"""Details of operations that perform deletes of any entities. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message='GenericOperationMetadata', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py deleted file mode 100644 index fff759149d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ /dev/null @@ -1,436 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.cloud.aiplatform_v1beta1.types import value as gca_value -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - }, -) - - -class PipelineJob(proto.Message): - r"""An instance of a machine learning PipelineJob. - Attributes: - name (str): - Output only. The resource name of the - PipelineJob. - display_name (str): - The display name of the Pipeline. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Pipeline creation time. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Pipeline start time. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Pipeline end time. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this PipelineJob - was most recently updated. - pipeline_spec (google.protobuf.struct_pb2.Struct): - Required. The spec of the pipeline. - state (google.cloud.aiplatform_v1beta1.types.PipelineState): - Output only. The detailed state of the job. - job_detail (google.cloud.aiplatform_v1beta1.types.PipelineJobDetail): - Output only. The details of pipeline run. Not - available in the list view. - error (google.rpc.status_pb2.Status): - Output only. The error that occurred during - pipeline execution. Only populated when the - pipeline's state is FAILED or CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.LabelsEntry]): - The labels with user-defined metadata to - organize PipelineJob. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - runtime_config (google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig): - Runtime config of the pipeline. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - pipelineJob. If set, this PipelineJob and all of - its sub-resources will be secured by this key. - service_account (str): - The service account that the pipeline workload runs as. If - not specified, the Compute Engine default service account in - the project will be used. See - https://cloud.google.com/compute/docs/access/service-accounts#default_service_account - - Users starting the pipeline must have the - ``iam.serviceAccounts.actAs`` permission on this service - account. - network (str): - The full name of the Compute Engine - `network `__ - to which the Pipeline Job's workload should be peered. For - example, ``projects/12345/global/networks/myVPC``. - `Format `__ - is of the form - ``projects/{project}/global/networks/{network}``. Where - {project} is a project number, as in ``12345``, and - {network} is a network name. - - Private services access must already be configured for the - network. Pipeline job will apply the network configuration - to the GCP resources being launched, if applied, such as - Vertex AI Training or Dataflow job. If left unspecified, the - workload is not peered with any network. - """ - - class RuntimeConfig(proto.Message): - r"""The runtime config of a PipelineJob. - Attributes: - parameters (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParametersEntry]): - The runtime parameters of the PipelineJob. The parameters - will be passed into - [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. - gcs_output_directory (str): - Required. A path in a Cloud Storage bucket, which will be - treated as the root output directory of the pipeline. It is - used by the system to generate the paths of output - artifacts. The artifact paths are generated with a sub-path - pattern ``{job_id}/{task_id}/{output_key}`` under the - specified output directory. The service account specified in - this pipeline must have the ``storage.objects.get`` and - ``storage.objects.create`` permissions for this bucket. - """ - - parameters = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=gca_value.Value, - ) - gcs_output_directory = proto.Field( - proto.STRING, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - pipeline_spec = proto.Field( - proto.MESSAGE, - number=7, - message=struct_pb2.Struct, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=pipeline_state.PipelineState, - ) - job_detail = proto.Field( - proto.MESSAGE, - number=9, - message='PipelineJobDetail', - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - runtime_config = proto.Field( - proto.MESSAGE, - number=12, - message=RuntimeConfig, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=16, - message=gca_encryption_spec.EncryptionSpec, - ) - service_account = proto.Field( - proto.STRING, - number=17, - ) - network = proto.Field( - proto.STRING, - number=18, - ) - - -class PipelineJobDetail(proto.Message): - r"""The runtime detail of PipelineJob. - Attributes: - pipeline_context (google.cloud.aiplatform_v1beta1.types.Context): - Output only. The context of the pipeline. - pipeline_run_context (google.cloud.aiplatform_v1beta1.types.Context): - Output only. The context of the current - pipeline run. - task_details (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail]): - Output only. The runtime details of the tasks - under the pipeline. - """ - - pipeline_context = proto.Field( - proto.MESSAGE, - number=1, - message=context.Context, - ) - pipeline_run_context = proto.Field( - proto.MESSAGE, - number=2, - message=context.Context, - ) - task_details = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='PipelineTaskDetail', - ) - - -class PipelineTaskDetail(proto.Message): - r"""The runtime detail of a task execution. - Attributes: - task_id (int): - Output only. The system generated ID of the - task. - parent_task_id (int): - Output only. The id of the parent task if the - task is within a component scope. Empty if the - task is at the root level. - task_name (str): - Output only. The user specified name of the task that is - defined in [PipelineJob.spec][]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Task create time. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Task start time. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Task end time. - executor_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail): - Output only. The detailed execution info. - state (google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.State): - Output only. State of the task. - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Output only. The execution metadata of the - task. - error (google.rpc.status_pb2.Status): - Output only. The error that occurred during - task execution. Only populated when the task's - state is FAILED or CANCELLED. - inputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.InputsEntry]): - Output only. The runtime input artifacts of - the task. - outputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.OutputsEntry]): - Output only. The runtime output artifacts of - the task. - """ - class State(proto.Enum): - r"""Specifies state of TaskExecution""" - STATE_UNSPECIFIED = 0 - PENDING = 1 - RUNNING = 2 - SUCCEEDED = 3 - CANCEL_PENDING = 4 - CANCELLING = 5 - CANCELLED = 6 - FAILED = 7 - SKIPPED = 8 - NOT_TRIGGERED = 9 - - class ArtifactList(proto.Message): - r"""A list of artifact metadata. - Attributes: - artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): - Output only. A list of artifact metadata. - """ - - artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=artifact.Artifact, - ) - - task_id = proto.Field( - proto.INT64, - number=1, - ) - parent_task_id = proto.Field( - proto.INT64, - number=12, - ) - task_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - executor_detail = proto.Field( - proto.MESSAGE, - number=6, - message='PipelineTaskExecutorDetail', - ) - state = proto.Field( - proto.ENUM, - number=7, - enum=State, - ) - execution = proto.Field( - proto.MESSAGE, - number=8, - message=gca_execution.Execution, - ) - error = proto.Field( - proto.MESSAGE, - number=9, - message=status_pb2.Status, - ) - inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=10, - message=ArtifactList, - ) - outputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=11, - message=ArtifactList, - ) - - -class PipelineTaskExecutorDetail(proto.Message): - r"""The runtime detail of a pipeline executor. - Attributes: - container_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.ContainerDetail): - Output only. The detailed info for a - container executor. - custom_job_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.CustomJobDetail): - Output only. The detailed info for a custom - job executor. - """ - - class ContainerDetail(proto.Message): - r"""The detail of a container execution. It contains the job - names of the lifecycle of a container execution. - - Attributes: - main_job (str): - Output only. The name of the - [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for - the main container execution. - pre_caching_check_job (str): - Output only. The name of the - [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for - the pre-caching-check container execution. This job will be - available if the - [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] - specifies the ``pre_caching_check`` hook in the lifecycle - events. - """ - - main_job = proto.Field( - proto.STRING, - number=1, - ) - pre_caching_check_job = proto.Field( - proto.STRING, - number=2, - ) - - class CustomJobDetail(proto.Message): - r"""The detailed info for a custom job executor. - Attributes: - job (str): - Output only. The name of the - [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob]. - """ - - job = proto.Field( - proto.STRING, - number=1, - ) - - container_detail = proto.Field( - proto.MESSAGE, - number=1, - oneof='details', - message=ContainerDetail, - ) - custom_job_detail = proto.Field( - proto.MESSAGE, - number=2, - oneof='details', - message=CustomJobDetail, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py deleted file mode 100644 index 2043ad1221..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ /dev/null @@ -1,367 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateTrainingPipelineRequest', - 'GetTrainingPipelineRequest', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'DeleteTrainingPipelineRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'GetPipelineJobRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'DeletePipelineJobRequest', - 'CancelPipelineJobRequest', - }, -) - - -class CreateTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - training_pipeline (google.cloud.aiplatform_v1beta1.types.TrainingPipeline): - Required. The TrainingPipeline to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - training_pipeline = proto.Field( - proto.MESSAGE, - number=2, - message=gca_training_pipeline.TrainingPipeline, - ) - - -class GetTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline resource. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTrainingPipelinesRequest(proto.Message): - r"""Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - - - ``NOT display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token] - of the previous - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListTrainingPipelinesResponse(proto.Message): - r"""Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] - - Attributes: - training_pipelines (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline]): - List of TrainingPipelines in the requested - page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - training_pipelines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_training_pipeline.TrainingPipeline, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline resource to be - deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreatePipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): - Required. The PipelineJob to create. - pipeline_job_id (str): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not provided, an - ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - pipeline_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_pipeline_job.PipelineJob, - ) - pipeline_job_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetPipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListPipelineJobsRequest(proto.Message): - r"""Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. Supported fields: - - - ``display_name`` supports ``=`` and ``!=``. - - ``state`` supports ``=`` and ``!=``. - - The following examples demonstrate how to filter the list of - PipelineJobs: - - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - - ``NOT display_name="my_pipeline"`` - - ``state="PIPELINE_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsResponse.next_page_token] - of the previous - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] - call. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - - -class ListPipelineJobsResponse(proto.Message): - r"""Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] - - Attributes: - pipeline_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob]): - List of PipelineJobs in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - pipeline_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_pipeline_job.PipelineJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeletePipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelPipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py deleted file mode 100644 index 83459cab69..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'PipelineState', - }, -) - - -class PipelineState(proto.Enum): - r"""Describes the state of a pipeline.""" - PIPELINE_STATE_UNSPECIFIED = 0 - PIPELINE_STATE_QUEUED = 1 - PIPELINE_STATE_PENDING = 2 - PIPELINE_STATE_RUNNING = 3 - PIPELINE_STATE_SUCCEEDED = 4 - PIPELINE_STATE_FAILED = 5 - PIPELINE_STATE_CANCELLING = 6 - PIPELINE_STATE_CANCELLED = 7 - PIPELINE_STATE_PAUSED = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py deleted file mode 100644 index b38a2c1f34..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ /dev/null @@ -1,214 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import explanation -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'PredictRequest', - 'PredictResponse', - 'ExplainRequest', - 'ExplainResponse', - }, -) - - -class PredictRequest(proto.Message): - r"""Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint requested to serve the - prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the prediction - call. A DeployedModel may have an upper limit on the number - of instances it supports per request, and when it is - exceeded the prediction call errors in case of AutoML - Models, or, in case of customer created Models, the - behaviour is as documented by that Model. The schema of any - single instance may be specified via Endpoint's - DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of the - parameters may be specified via Endpoint's DeployedModels' - [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - instances = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - parameters = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - -class PredictResponse(proto.Message): - r"""Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - - Attributes: - predictions (Sequence[google.protobuf.struct_pb2.Value]): - The predictions that are the output of the predictions call. - The schema of any single prediction may be specified via - Endpoint's DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. - deployed_model_id (str): - ID of the Endpoint's DeployedModel that - served this prediction. - """ - - predictions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - - -class ExplainRequest(proto.Message): - r"""Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint requested to serve the - explanation. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - explanation call. A DeployedModel may have an upper limit on - the number of instances it supports per request, and when it - is exceeded the explanation call errors in case of AutoML - Models, or, in case of customer created Models, the - behaviour is as documented by that Model. The schema of any - single instance may be specified via Endpoint's - DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of the - parameters may be specified via Endpoint's DeployedModels' - [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - explanation_spec_override (google.cloud.aiplatform_v1beta1.types.ExplanationSpecOverride): - If specified, overrides the - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - of the DeployedModel. Can be used for explaining prediction - results with different configurations, such as: - - - Explaining top-5 predictions results as opposed to top-1; - - Increasing path count or step count of the attribution - methods to reduce approximate errors; - - Using different baselines for explaining the prediction - results. - deployed_model_id (str): - If specified, this ExplainRequest will be served by the - chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - instances = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - parameters = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - explanation_spec_override = proto.Field( - proto.MESSAGE, - number=5, - message=explanation.ExplanationSpecOverride, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=3, - ) - - -class ExplainResponse(proto.Message): - r"""Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - - Attributes: - explanations (Sequence[google.cloud.aiplatform_v1beta1.types.Explanation]): - The explanations of the Model's - [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. - - It has the same number of elements as - [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] - to be explained. - deployed_model_id (str): - ID of the Endpoint's DeployedModel that - served this explanation. - predictions (Sequence[google.protobuf.struct_pb2.Value]): - The predictions that are the output of the predictions call. - Same as - [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. - """ - - explanations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=explanation.Explanation, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - predictions = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py deleted file mode 100644 index a985c80f43..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'SpecialistPool', - }, -) - - -class SpecialistPool(proto.Message): - r"""SpecialistPool represents customers' own workforce to work on - their data labeling jobs. It includes a group of specialist - managers who are responsible for managing the labelers in this - pool as well as customers' data labeling jobs associated with - this pool. - Customers create specialist pool as well as start data labeling - jobs on Cloud, managers and labelers work with the jobs using - CrowdCompute console. - - Attributes: - name (str): - Required. The resource name of the - SpecialistPool. - display_name (str): - Required. The user-defined name of the - SpecialistPool. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - This field should be unique on project-level. - specialist_managers_count (int): - Output only. The number of Specialists in - this SpecialistPool. - specialist_manager_emails (Sequence[str]): - The email addresses of the specialists in the - SpecialistPool. - pending_data_labeling_jobs (Sequence[str]): - Output only. The resource name of the pending - data labeling jobs. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - specialist_managers_count = proto.Field( - proto.INT32, - number=3, - ) - specialist_manager_emails = proto.RepeatedField( - proto.STRING, - number=4, - ) - pending_data_labeling_jobs = proto.RepeatedField( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py deleted file mode 100644 index c6ebb83779..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateSpecialistPoolRequest', - 'CreateSpecialistPoolOperationMetadata', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'DeleteSpecialistPoolRequest', - 'UpdateSpecialistPoolRequest', - 'UpdateSpecialistPoolOperationMetadata', - }, -) - - -class CreateSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. - - Attributes: - parent (str): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): - Required. The SpecialistPool to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - specialist_pool = proto.Field( - proto.MESSAGE, - number=2, - message=gca_specialist_pool.SpecialistPool, - ) - - -class CreateSpecialistPoolOperationMetadata(proto.Message): - r"""Runtime operation information for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. - - Attributes: - name (str): - Required. The name of the SpecialistPool resource. The form - is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListSpecialistPoolsRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - - Attributes: - parent (str): - Required. The name of the SpecialistPool's parent resource. - Format: ``projects/{project}/locations/{location}`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained by - [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListSpecialistPoolsResponse.next_page_token] - of the previous - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools] - call. Return first page if empty. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - FieldMask represents a set of - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=4, - message=field_mask_pb2.FieldMask, - ) - - -class ListSpecialistPoolsResponse(proto.Message): - r"""Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - - Attributes: - specialist_pools (Sequence[google.cloud.aiplatform_v1beta1.types.SpecialistPool]): - A list of SpecialistPools that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - specialist_pools = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_specialist_pool.SpecialistPool, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. - - Attributes: - name (str): - Required. The resource name of the SpecialistPool to delete. - Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - force (bool): - If set to true, any specialist managers in - this SpecialistPool will also be deleted. - (Otherwise, the request will only work if the - SpecialistPool has no specialist managers.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class UpdateSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. - - Attributes: - specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): - Required. The SpecialistPool which replaces - the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the - resource. - """ - - specialist_pool = proto.Field( - proto.MESSAGE, - number=1, - message=gca_specialist_pool.SpecialistPool, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateSpecialistPoolOperationMetadata(proto.Message): - r"""Runtime operation metadata for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. - - Attributes: - specialist_pool (str): - Output only. The name of the SpecialistPool to which the - specialists are being added. Format: - ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`` - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - specialist_pool = proto.Field( - proto.STRING, - number=1, - ) - generic_metadata = proto.Field( - proto.MESSAGE, - number=2, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py deleted file mode 100644 index 4e2a1161ba..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py +++ /dev/null @@ -1,748 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Study', - 'Trial', - 'StudySpec', - 'Measurement', - }, -) - - -class Study(proto.Message): - r"""A message representing a Study. - Attributes: - name (str): - Output only. The name of a study. The study's globally - unique identifier. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - display_name (str): - Required. Describes the Study, default value - is empty string. - study_spec (google.cloud.aiplatform_v1beta1.types.StudySpec): - Required. Configuration of the Study. - state (google.cloud.aiplatform_v1beta1.types.Study.State): - Output only. The detailed state of a Study. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time at which the study was - created. - inactive_reason (str): - Output only. A human readable reason why the - Study is inactive. This should be empty if a - study is ACTIVE or COMPLETED. - """ - class State(proto.Enum): - r"""Describes the Study state.""" - STATE_UNSPECIFIED = 0 - ACTIVE = 1 - INACTIVE = 2 - COMPLETED = 3 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - study_spec = proto.Field( - proto.MESSAGE, - number=3, - message='StudySpec', - ) - state = proto.Field( - proto.ENUM, - number=4, - enum=State, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - inactive_reason = proto.Field( - proto.STRING, - number=6, - ) - - -class Trial(proto.Message): - r"""A message representing a Trial. A Trial contains a unique set - of Parameters that has been or will be evaluated, along with the - objective metrics got by running the Trial. - - Attributes: - name (str): - Output only. Resource name of the Trial - assigned by the service. - id (str): - Output only. The identifier of the Trial - assigned by the service. - state (google.cloud.aiplatform_v1beta1.types.Trial.State): - Output only. The detailed state of the Trial. - parameters (Sequence[google.cloud.aiplatform_v1beta1.types.Trial.Parameter]): - Output only. The parameters of the Trial. - final_measurement (google.cloud.aiplatform_v1beta1.types.Measurement): - Output only. The final measurement containing - the objective value. - measurements (Sequence[google.cloud.aiplatform_v1beta1.types.Measurement]): - Output only. A list of measurements that are strictly - lexicographically ordered by their induced tuples (steps, - elapsed_duration). These are used for early stopping - computations. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the Trial was started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the Trial's status changed to - ``SUCCEEDED`` or ``INFEASIBLE``. - client_id (str): - Output only. The identifier of the client that originally - requested this Trial. Each client is identified by a unique - client_id. When a client asks for a suggestion, Vizier will - assign it a Trial. The client should evaluate the Trial, - complete it, and report back to Vizier. If suggestion is - asked again by same client_id before the Trial is completed, - the same Trial will be returned. Multiple clients with - different client_ids can ask for suggestions simultaneously, - each of them will get their own Trial. - infeasible_reason (str): - Output only. A human readable string describing why the - Trial is infeasible. This is set only if Trial state is - ``INFEASIBLE``. - custom_job (str): - Output only. The CustomJob name linked to the - Trial. It's set for a HyperparameterTuningJob's - Trial. - """ - class State(proto.Enum): - r"""Describes a Trial state.""" - STATE_UNSPECIFIED = 0 - REQUESTED = 1 - ACTIVE = 2 - STOPPING = 3 - SUCCEEDED = 4 - INFEASIBLE = 5 - - class Parameter(proto.Message): - r"""A message representing a parameter to be tuned. - Attributes: - parameter_id (str): - Output only. The ID of the parameter. The parameter should - be defined in [StudySpec's - Parameters][google.cloud.aiplatform.v1beta1.StudySpec.parameters]. - value (google.protobuf.struct_pb2.Value): - Output only. The value of the parameter. ``number_value`` - will be set if a parameter defined in StudySpec is in type - 'INTEGER', 'DOUBLE' or 'DISCRETE'. ``string_value`` will be - set if a parameter defined in StudySpec is in type - 'CATEGORICAL'. - """ - - parameter_id = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - id = proto.Field( - proto.STRING, - number=2, - ) - state = proto.Field( - proto.ENUM, - number=3, - enum=State, - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=Parameter, - ) - final_measurement = proto.Field( - proto.MESSAGE, - number=5, - message='Measurement', - ) - measurements = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='Measurement', - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - client_id = proto.Field( - proto.STRING, - number=9, - ) - infeasible_reason = proto.Field( - proto.STRING, - number=10, - ) - custom_job = proto.Field( - proto.STRING, - number=11, - ) - - -class StudySpec(proto.Message): - r"""Represents specification of a Study. - Attributes: - decay_curve_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.DecayCurveAutomatedStoppingSpec): - The automated early stopping spec using decay - curve rule. - median_automated_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.MedianAutomatedStoppingSpec): - The automated early stopping spec using - median rule. - convex_stop_config (google.cloud.aiplatform_v1beta1.types.StudySpec.ConvexStopConfig): - The automated early stopping using convex - stopping rule. - metrics (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec]): - Required. Metric specs for the Study. - parameters (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec]): - Required. The set of parameters to tune. - algorithm (google.cloud.aiplatform_v1beta1.types.StudySpec.Algorithm): - The search algorithm specified for the Study. - observation_noise (google.cloud.aiplatform_v1beta1.types.StudySpec.ObservationNoise): - The observation noise level of the study. - Currently only supported by the Vizier service. - Not supported by HyperparamterTuningJob or - TrainingPipeline. - measurement_selection_type (google.cloud.aiplatform_v1beta1.types.StudySpec.MeasurementSelectionType): - Describe which measurement selection type - will be used - """ - class Algorithm(proto.Enum): - r"""The available search algorithms for the Study.""" - ALGORITHM_UNSPECIFIED = 0 - GRID_SEARCH = 2 - RANDOM_SEARCH = 3 - - class ObservationNoise(proto.Enum): - r"""Describes the noise level of the repeated observations. - "Noisy" means that the repeated observations with the same Trial - parameters may lead to different metric evaluations. - """ - OBSERVATION_NOISE_UNSPECIFIED = 0 - LOW = 1 - HIGH = 2 - - class MeasurementSelectionType(proto.Enum): - r"""This indicates which measurement to use if/when the service - automatically selects the final measurement from previously reported - intermediate measurements. Choose this based on two considerations: - A) Do you expect your measurements to monotonically improve? If so, - choose LAST_MEASUREMENT. On the other hand, if you're in a situation - where your system can "over-train" and you expect the performance to - get better for a while but then start declining, choose - BEST_MEASUREMENT. B) Are your measurements significantly noisy - and/or irreproducible? If so, BEST_MEASUREMENT will tend to be - over-optimistic, and it may be better to choose LAST_MEASUREMENT. If - both or neither of (A) and (B) apply, it doesn't matter which - selection type is chosen. - """ - MEASUREMENT_SELECTION_TYPE_UNSPECIFIED = 0 - LAST_MEASUREMENT = 1 - BEST_MEASUREMENT = 2 - - class MetricSpec(proto.Message): - r"""Represents a metric to optimize. - Attributes: - metric_id (str): - Required. The ID of the metric. Must not - contain whitespaces and must be unique amongst - all MetricSpecs. - goal (google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec.GoalType): - Required. The optimization goal of the - metric. - """ - class GoalType(proto.Enum): - r"""The available types of optimization goals.""" - GOAL_TYPE_UNSPECIFIED = 0 - MAXIMIZE = 1 - MINIMIZE = 2 - - metric_id = proto.Field( - proto.STRING, - number=1, - ) - goal = proto.Field( - proto.ENUM, - number=2, - enum='StudySpec.MetricSpec.GoalType', - ) - - class ParameterSpec(proto.Message): - r"""Represents a single parameter to optimize. - Attributes: - double_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.DoubleValueSpec): - The value spec for a 'DOUBLE' parameter. - integer_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.IntegerValueSpec): - The value spec for an 'INTEGER' parameter. - categorical_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.CategoricalValueSpec): - The value spec for a 'CATEGORICAL' parameter. - discrete_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.DiscreteValueSpec): - The value spec for a 'DISCRETE' parameter. - parameter_id (str): - Required. The ID of the parameter. Must not - contain whitespaces and must be unique amongst - all ParameterSpecs. - scale_type (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ScaleType): - How the parameter should be scaled. Leave unset for - ``CATEGORICAL`` parameters. - conditional_parameter_specs (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec]): - A conditional parameter node is active if the parameter's - value matches the conditional node's parent_value_condition. - - If two items in conditional_parameter_specs have the same - name, they must have disjoint parent_value_condition. - """ - class ScaleType(proto.Enum): - r"""The type of scaling that should be applied to this parameter.""" - SCALE_TYPE_UNSPECIFIED = 0 - UNIT_LINEAR_SCALE = 1 - UNIT_LOG_SCALE = 2 - UNIT_REVERSE_LOG_SCALE = 3 - - class DoubleValueSpec(proto.Message): - r"""Value specification for a parameter in ``DOUBLE`` type. - Attributes: - min_value (float): - Required. Inclusive minimum value of the - parameter. - max_value (float): - Required. Inclusive maximum value of the - parameter. - """ - - min_value = proto.Field( - proto.DOUBLE, - number=1, - ) - max_value = proto.Field( - proto.DOUBLE, - number=2, - ) - - class IntegerValueSpec(proto.Message): - r"""Value specification for a parameter in ``INTEGER`` type. - Attributes: - min_value (int): - Required. Inclusive minimum value of the - parameter. - max_value (int): - Required. Inclusive maximum value of the - parameter. - """ - - min_value = proto.Field( - proto.INT64, - number=1, - ) - max_value = proto.Field( - proto.INT64, - number=2, - ) - - class CategoricalValueSpec(proto.Message): - r"""Value specification for a parameter in ``CATEGORICAL`` type. - Attributes: - values (Sequence[str]): - Required. The list of possible categories. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - class DiscreteValueSpec(proto.Message): - r"""Value specification for a parameter in ``DISCRETE`` type. - Attributes: - values (Sequence[float]): - Required. A list of possible values. - The list should be in increasing order and at - least 1e-10 apart. For instance, this parameter - might have possible settings of 1.5, 2.5, and - 4.0. This list should not contain more than - 1,000 values. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - - class ConditionalParameterSpec(proto.Message): - r"""Represents a parameter spec with condition from its parent - parameter. - - Attributes: - parent_discrete_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition): - The spec for matching values from a parent parameter of - ``DISCRETE`` type. - parent_int_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition): - The spec for matching values from a parent parameter of - ``INTEGER`` type. - parent_categorical_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition): - The spec for matching values from a parent parameter of - ``CATEGORICAL`` type. - parameter_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec): - Required. The spec for a conditional - parameter. - """ - - class DiscreteValueCondition(proto.Message): - r"""Represents the spec to match discrete values from parent - parameter. - - Attributes: - values (Sequence[float]): - Required. Matches values of the parent parameter of - 'DISCRETE' type. All values must exist in - ``discrete_value_spec`` of parent parameter. - - The Epsilon of the value matching is 1e-10. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - - class IntValueCondition(proto.Message): - r"""Represents the spec to match integer values from parent - parameter. - - Attributes: - values (Sequence[int]): - Required. Matches values of the parent parameter of - 'INTEGER' type. All values must lie in - ``integer_value_spec`` of parent parameter. - """ - - values = proto.RepeatedField( - proto.INT64, - number=1, - ) - - class CategoricalValueCondition(proto.Message): - r"""Represents the spec to match categorical values from parent - parameter. - - Attributes: - values (Sequence[str]): - Required. Matches values of the parent parameter of - 'CATEGORICAL' type. All values must exist in - ``categorical_value_spec`` of parent parameter. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - parent_discrete_values = proto.Field( - proto.MESSAGE, - number=2, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition', - ) - parent_int_values = proto.Field( - proto.MESSAGE, - number=3, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition', - ) - parent_categorical_values = proto.Field( - proto.MESSAGE, - number=4, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition', - ) - parameter_spec = proto.Field( - proto.MESSAGE, - number=1, - message='StudySpec.ParameterSpec', - ) - - double_value_spec = proto.Field( - proto.MESSAGE, - number=2, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.DoubleValueSpec', - ) - integer_value_spec = proto.Field( - proto.MESSAGE, - number=3, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.IntegerValueSpec', - ) - categorical_value_spec = proto.Field( - proto.MESSAGE, - number=4, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.CategoricalValueSpec', - ) - discrete_value_spec = proto.Field( - proto.MESSAGE, - number=5, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.DiscreteValueSpec', - ) - parameter_id = proto.Field( - proto.STRING, - number=1, - ) - scale_type = proto.Field( - proto.ENUM, - number=6, - enum='StudySpec.ParameterSpec.ScaleType', - ) - conditional_parameter_specs = proto.RepeatedField( - proto.MESSAGE, - number=10, - message='StudySpec.ParameterSpec.ConditionalParameterSpec', - ) - - class DecayCurveAutomatedStoppingSpec(proto.Message): - r"""The decay curve automated stopping rule builds a Gaussian - Process Regressor to predict the final objective value of a - Trial based on the already completed Trials and the intermediate - measurements of the current Trial. Early stopping is requested - for the current Trial if there is very low probability to exceed - the optimal value found so far. - - Attributes: - use_elapsed_duration (bool): - True if - [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration] - is used as the x-axis of each Trials Decay Curve. Otherwise, - [Measurement.step_count][google.cloud.aiplatform.v1beta1.Measurement.step_count] - will be used as the x-axis. - """ - - use_elapsed_duration = proto.Field( - proto.BOOL, - number=1, - ) - - class MedianAutomatedStoppingSpec(proto.Message): - r"""The median automated stopping rule stops a pending Trial if the - Trial's best objective_value is strictly below the median - 'performance' of all completed Trials reported up to the Trial's - last measurement. Currently, 'performance' refers to the running - average of the objective values reported by the Trial in each - measurement. - - Attributes: - use_elapsed_duration (bool): - True if median automated stopping rule applies on - [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration]. - It means that elapsed_duration field of latest measurement - of current Trial is used to compute median objective value - for each completed Trials. - """ - - use_elapsed_duration = proto.Field( - proto.BOOL, - number=1, - ) - - class ConvexStopConfig(proto.Message): - r"""Configuration for ConvexStopPolicy. - Attributes: - max_num_steps (int): - Steps used in predicting the final objective for early - stopped trials. In general, it's set to be the same as the - defined steps in training / tuning. When use_steps is false, - this field is set to the maximum elapsed seconds. - min_num_steps (int): - Minimum number of steps for a trial to complete. Trials - which do not have a measurement with num_steps > - min_num_steps won't be considered for early stopping. It's - ok to set it to 0, and a trial can be early stopped at any - stage. By default, min_num_steps is set to be one-tenth of - the max_num_steps. When use_steps is false, this field is - set to the minimum elapsed seconds. - autoregressive_order (int): - The number of Trial measurements used in - autoregressive model for value prediction. A - trial won't be considered early stopping if has - fewer measurement points. - learning_rate_parameter_name (str): - The hyper-parameter name used in the tuning job that stands - for learning rate. Leave it blank if learning rate is not in - a parameter in tuning. The learning_rate is used to estimate - the objective value of the ongoing trial. - use_seconds (bool): - This bool determines whether or not the rule is applied - based on elapsed_secs or steps. If use_seconds==false, the - early stopping decision is made according to the predicted - objective values according to the target steps. If - use_seconds==true, elapsed_secs is used instead of steps. - Also, in this case, the parameters max_num_steps and - min_num_steps are overloaded to contain max_elapsed_seconds - and min_elapsed_seconds. - """ - - max_num_steps = proto.Field( - proto.INT64, - number=1, - ) - min_num_steps = proto.Field( - proto.INT64, - number=2, - ) - autoregressive_order = proto.Field( - proto.INT64, - number=3, - ) - learning_rate_parameter_name = proto.Field( - proto.STRING, - number=4, - ) - use_seconds = proto.Field( - proto.BOOL, - number=5, - ) - - decay_curve_stopping_spec = proto.Field( - proto.MESSAGE, - number=4, - oneof='automated_stopping_spec', - message=DecayCurveAutomatedStoppingSpec, - ) - median_automated_stopping_spec = proto.Field( - proto.MESSAGE, - number=5, - oneof='automated_stopping_spec', - message=MedianAutomatedStoppingSpec, - ) - convex_stop_config = proto.Field( - proto.MESSAGE, - number=8, - oneof='automated_stopping_spec', - message=ConvexStopConfig, - ) - metrics = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=MetricSpec, - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=ParameterSpec, - ) - algorithm = proto.Field( - proto.ENUM, - number=3, - enum=Algorithm, - ) - observation_noise = proto.Field( - proto.ENUM, - number=6, - enum=ObservationNoise, - ) - measurement_selection_type = proto.Field( - proto.ENUM, - number=7, - enum=MeasurementSelectionType, - ) - - -class Measurement(proto.Message): - r"""A message representing a Measurement of a Trial. A - Measurement contains the Metrics got by executing a Trial using - suggested hyperparameter values. - - Attributes: - elapsed_duration (google.protobuf.duration_pb2.Duration): - Output only. Time that the Trial has been - running at the point of this Measurement. - step_count (int): - Output only. The number of steps the machine - learning model has been trained for. Must be - non-negative. - metrics (Sequence[google.cloud.aiplatform_v1beta1.types.Measurement.Metric]): - Output only. A list of metrics got by - evaluating the objective functions using - suggested Parameter values. - """ - - class Metric(proto.Message): - r"""A message representing a metric in the measurement. - Attributes: - metric_id (str): - Output only. The ID of the Metric. The Metric should be - defined in [StudySpec's - Metrics][google.cloud.aiplatform.v1beta1.StudySpec.metrics]. - value (float): - Output only. The value for this metric. - """ - - metric_id = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.DOUBLE, - number=2, - ) - - elapsed_duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - step_count = proto.Field( - proto.INT64, - number=2, - ) - metrics = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Metric, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py deleted file mode 100644 index a984eb652d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Tensorboard', - }, -) - - -class Tensorboard(proto.Message): - r"""Tensorboard is a physical database that stores users' - training metrics. A default Tensorboard is provided in each - region of a GCP project. If needed users can also create extra - Tensorboards in their projects. - - Attributes: - name (str): - Output only. Name of the Tensorboard. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - display_name (str): - Required. User provided name of this - Tensorboard. - description (str): - Description of this Tensorboard. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Tensorboard. If set, this Tensorboard and all - sub-resources of this Tensorboard will be - secured by this key. - blob_storage_path_prefix (str): - Output only. Consumer project Cloud Storage - path prefix used to store blob data, which can - either be a bucket or directory. Does not end - with a '/'. - run_count (int): - Output only. The number of Runs stored in - this Tensorboard. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Tensorboard - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Tensorboard - was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard.LabelsEntry]): - The labels with user-defined metadata to - organize your Tensorboards. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Tensorboard (System labels - are excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=11, - message=gca_encryption_spec.EncryptionSpec, - ) - blob_storage_path_prefix = proto.Field( - proto.STRING, - number=10, - ) - run_count = proto.Field( - proto.INT32, - number=5, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py deleted file mode 100644 index c9336e93b3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py +++ /dev/null @@ -1,187 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TimeSeriesData', - 'TimeSeriesDataPoint', - 'Scalar', - 'TensorboardTensor', - 'TensorboardBlobSequence', - 'TensorboardBlob', - }, -) - - -class TimeSeriesData(proto.Message): - r"""All the data stored in a TensorboardTimeSeries. - Attributes: - tensorboard_time_series_id (str): - Required. The ID of the - TensorboardTimeSeries, which will become the - final component of the TensorboardTimeSeries' - resource name - value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): - Required. Immutable. The value type of this - time series. All the values in this time series - data must match this value type. - values (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): - Required. Data points in this time series. - """ - - tensorboard_time_series_id = proto.Field( - proto.STRING, - number=1, - ) - value_type = proto.Field( - proto.ENUM, - number=2, - enum=tensorboard_time_series.TensorboardTimeSeries.ValueType, - ) - values = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='TimeSeriesDataPoint', - ) - - -class TimeSeriesDataPoint(proto.Message): - r"""A TensorboardTimeSeries data point. - Attributes: - scalar (google.cloud.aiplatform_v1beta1.types.Scalar): - A scalar value. - tensor (google.cloud.aiplatform_v1beta1.types.TensorboardTensor): - A tensor value. - blobs (google.cloud.aiplatform_v1beta1.types.TensorboardBlobSequence): - A blob sequence value. - wall_time (google.protobuf.timestamp_pb2.Timestamp): - Wall clock timestamp when this data point is - generated by the end user. - step (int): - Step index of this data point within the run. - """ - - scalar = proto.Field( - proto.MESSAGE, - number=3, - oneof='value', - message='Scalar', - ) - tensor = proto.Field( - proto.MESSAGE, - number=4, - oneof='value', - message='TensorboardTensor', - ) - blobs = proto.Field( - proto.MESSAGE, - number=5, - oneof='value', - message='TensorboardBlobSequence', - ) - wall_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - step = proto.Field( - proto.INT64, - number=2, - ) - - -class Scalar(proto.Message): - r"""One point viewable on a scalar metric plot. - Attributes: - value (float): - Value of the point at this step / timestamp. - """ - - value = proto.Field( - proto.DOUBLE, - number=1, - ) - - -class TensorboardTensor(proto.Message): - r"""One point viewable on a tensor metric plot. - Attributes: - value (bytes): - Required. Serialized form of - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto - version_number (int): - Optional. Version number of TensorProto used to serialize - [value][google.cloud.aiplatform.v1beta1.TensorboardTensor.value]. - """ - - value = proto.Field( - proto.BYTES, - number=1, - ) - version_number = proto.Field( - proto.INT32, - number=2, - ) - - -class TensorboardBlobSequence(proto.Message): - r"""One point viewable on a blob metric plot, but mostly just a wrapper - message to work around repeated fields can't be used directly within - ``oneof`` fields. - - Attributes: - values (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): - List of blobs contained within the sequence. - """ - - values = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='TensorboardBlob', - ) - - -class TensorboardBlob(proto.Message): - r"""One blob (e.g, image, graph) viewable on a blob metric plot. - Attributes: - id (str): - Output only. A URI safe key uniquely - identifying a blob. Can be used to locate the - blob stored in the Cloud Storage bucket of the - consumer project. - data (bytes): - Optional. The bytes of the blob is not - present unless it's returned by the - ReadTensorboardBlobData endpoint. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - data = proto.Field( - proto.BYTES, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py deleted file mode 100644 index 498bb15565..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TensorboardExperiment', - }, -) - - -class TensorboardExperiment(proto.Message): - r"""A TensorboardExperiment is a group of TensorboardRuns, that - are typically the results of a training job run, in a - Tensorboard. - - Attributes: - name (str): - Output only. Name of the TensorboardExperiment. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - display_name (str): - User provided name of this - TensorboardExperiment. - description (str): - Description of this TensorboardExperiment. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardExperiment was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardExperiment was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment.LabelsEntry]): - The labels with user-defined metadata to organize your - Datasets. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Dataset (System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Dataset: - - - "aiplatform.googleapis.com/dataset_metadata_schema": - - - output only, its value is the - [metadata_schema's][metadata_schema_uri] title. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - source (str): - Immutable. Source of the - TensorboardExperiment. Example: a custom - training job. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - source = proto.Field( - proto.STRING, - number=8, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py deleted file mode 100644 index 566908bba3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TensorboardRun', - }, -) - - -class TensorboardRun(proto.Message): - r"""TensorboardRun maps to a specific execution of a training job - with a given set of hyperparameter values, model definition, - dataset, etc - - Attributes: - name (str): - Output only. Name of the TensorboardRun. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - display_name (str): - Required. User provided name of this - TensorboardRun. This value must be unique among - all TensorboardRuns belonging to the same parent - TensorboardExperiment. - description (str): - Description of this TensorboardRun. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardRun was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardRun was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun.LabelsEntry]): - - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py deleted file mode 100644 index 5f9eb0a856..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py +++ /dev/null @@ -1,1048 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateTensorboardRequest', - 'GetTensorboardRequest', - 'ListTensorboardsRequest', - 'ListTensorboardsResponse', - 'UpdateTensorboardRequest', - 'DeleteTensorboardRequest', - 'CreateTensorboardExperimentRequest', - 'GetTensorboardExperimentRequest', - 'ListTensorboardExperimentsRequest', - 'ListTensorboardExperimentsResponse', - 'UpdateTensorboardExperimentRequest', - 'DeleteTensorboardExperimentRequest', - 'CreateTensorboardRunRequest', - 'GetTensorboardRunRequest', - 'ReadTensorboardBlobDataRequest', - 'ReadTensorboardBlobDataResponse', - 'ListTensorboardRunsRequest', - 'ListTensorboardRunsResponse', - 'UpdateTensorboardRunRequest', - 'DeleteTensorboardRunRequest', - 'CreateTensorboardTimeSeriesRequest', - 'GetTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesResponse', - 'UpdateTensorboardTimeSeriesRequest', - 'DeleteTensorboardTimeSeriesRequest', - 'ReadTensorboardTimeSeriesDataRequest', - 'ReadTensorboardTimeSeriesDataResponse', - 'WriteTensorboardRunDataRequest', - 'WriteTensorboardRunDataResponse', - 'ExportTensorboardTimeSeriesDataRequest', - 'ExportTensorboardTimeSeriesDataResponse', - 'CreateTensorboardOperationMetadata', - 'UpdateTensorboardOperationMetadata', - }, -) - - -class CreateTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Tensorboard in. Format: - ``projects/{project}/locations/{location}`` - tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): - Required. The Tensorboard to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard.Tensorboard, - ) - - -class GetTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. - - Attributes: - name (str): - Required. The name of the Tensorboard resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTensorboardsRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - - Attributes: - parent (str): - Required. The resource name of the Location - to list Tensorboards. Format: - 'projects/{project}/locations/{location}' - filter (str): - Lists the Tensorboards that match the filter - expression. - page_size (int): - The maximum number of Tensorboards to return. - The service may return fewer than this value. If - unspecified, at most 100 Tensorboards will be - returned. The maximum value is 100; values above - 100 will be coerced to 100. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardsResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - - Attributes: - tensorboards (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard]): - The Tensorboards mathching the request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardsRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboards = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard.Tensorboard, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the Tensorboard resource by the update. The - fields specified in the update_mask are relative to the - resource, not the full request. A field will be overwritten - if it is in the mask. If the user does not provide a mask - then all fields will be overwritten if new values are - specified. - tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): - Required. The Tensorboard's ``name`` field is used to - identify the Tensorboard to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard.Tensorboard, - ) - - -class DeleteTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. - - Attributes: - name (str): - Required. The name of the Tensorboard to be deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. - - Attributes: - parent (str): - Required. The resource name of the Tensorboard to create the - TensorboardExperiment in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): - The TensorboardExperiment to create. - tensorboard_experiment_id (str): - Required. The ID to use for the Tensorboard experiment, - which will become the final component of the Tensorboard - experiment's resource name. - - This value should be 1-128 characters, and valid characters - are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard_experiment = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_experiment.TensorboardExperiment, - ) - tensorboard_experiment_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. - - Attributes: - name (str): - Required. The name of the TensorboardExperiment resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTensorboardExperimentsRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - - Attributes: - parent (str): - Required. The resource name of the - Tensorboard to list TensorboardExperiments. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' - filter (str): - Lists the TensorboardExperiments that match - the filter expression. - page_size (int): - The maximum number of TensorboardExperiments - to return. The service may return fewer than - this value. If unspecified, at most 50 - TensorboardExperiments will be returned. The - maximum value is 1000; values above 1000 will be - coerced to 1000. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardExperimentsResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - - Attributes: - tensorboard_experiments (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment]): - The TensorboardExperiments mathching the - request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardExperimentsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardExperimentsRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboard_experiments = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_experiment.TensorboardExperiment, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardExperiment resource by the - update. The fields specified in the update_mask are relative - to the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if new - values are specified. - tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): - Required. The TensorboardExperiment's ``name`` field is used - to identify the TensorboardExperiment to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard_experiment = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_experiment.TensorboardExperiment, - ) - - -class DeleteTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. - - Attributes: - name (str): - Required. The name of the TensorboardExperiment to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. - - Attributes: - parent (str): - Required. The resource name of the Tensorboard to create the - TensorboardRun in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): - Required. The TensorboardRun to create. - tensorboard_run_id (str): - Required. The ID to use for the Tensorboard run, which will - become the final component of the Tensorboard run's resource - name. - - This value should be 1-128 characters, and valid characters - are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard_run = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_run.TensorboardRun, - ) - tensorboard_run_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. - - Attributes: - name (str): - Required. The name of the TensorboardRun resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ReadTensorboardBlobDataRequest(proto.Message): - r"""Request message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - - Attributes: - time_series (str): - Required. The resource name of the TensorboardTimeSeries to - list Blobs. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' - blob_ids (Sequence[str]): - IDs of the blobs to read. - """ - - time_series = proto.Field( - proto.STRING, - number=1, - ) - blob_ids = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class ReadTensorboardBlobDataResponse(proto.Message): - r"""Response message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - - Attributes: - blobs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): - Blob messages containing blob bytes. - """ - - blobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=tensorboard_data.TensorboardBlob, - ) - - -class ListTensorboardRunsRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - - Attributes: - parent (str): - Required. The resource name of the - Tensorboard to list TensorboardRuns. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' - filter (str): - Lists the TensorboardRuns that match the - filter expression. - page_size (int): - The maximum number of TensorboardRuns to - return. The service may return fewer than this - value. If unspecified, at most 50 - TensorboardRuns will be returned. The maximum - value is 1000; values above 1000 will be coerced - to 1000. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardRunsResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - - Attributes: - tensorboard_runs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun]): - The TensorboardRuns mathching the request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardRunsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardRunsRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboard_runs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_run.TensorboardRun, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardRun resource by the update. - The fields specified in the update_mask are relative to the - resource, not the full request. A field will be overwritten - if it is in the mask. If the user does not provide a mask - then all fields will be overwritten if new values are - specified. - tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): - Required. The TensorboardRun's ``name`` field is used to - identify the TensorboardRun to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard_run = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_run.TensorboardRun, - ) - - -class DeleteTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. - - Attributes: - name (str): - Required. The name of the TensorboardRun to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. - - Attributes: - parent (str): - Required. The resource name of the TensorboardRun to create - the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - tensorboard_time_series_id (str): - Optional. The user specified unique ID to use for the - TensorboardTimeSeries, which will become the final component - of the TensorboardTimeSeries's resource name. Ref: - go/ucaip-user-specified-id - - This value should match "[a-z0-9][a-z0-9-]{0, 127}". - tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries to - create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard_time_series_id = proto.Field( - proto.STRING, - number=3, - ) - tensorboard_time_series = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - - -class GetTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. - - Attributes: - name (str): - Required. The name of the TensorboardTimeSeries resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - - Attributes: - parent (str): - Required. The resource name of the - TensorboardRun to list TensorboardTimeSeries. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' - filter (str): - Lists the TensorboardTimeSeries that match - the filter expression. - page_size (int): - The maximum number of TensorboardTimeSeries - to return. The service may return fewer than - this value. If unspecified, at most 50 - TensorboardTimeSeries will be returned. The - maximum value is 1000; values above 1000 will be - coerced to 1000. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardTimeSeriesResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - - Attributes: - tensorboard_time_series (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries]): - The TensorboardTimeSeries mathching the - request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardTimeSeriesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardTimeSeriesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboard_time_series = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardTimeSeries resource by the - update. The fields specified in the update_mask are relative - to the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if new - values are specified. - tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries' ``name`` field is used - to identify the TensorboardTimeSeries to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard_time_series = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - - -class DeleteTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. - - Attributes: - name (str): - Required. The name of the TensorboardTimeSeries to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ReadTensorboardTimeSeriesDataRequest(proto.Message): - r"""Request message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - - Attributes: - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries to - read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - max_data_points (int): - The maximum number of TensorboardTimeSeries' - data to return. - This value should be a positive integer. - This value can be set to -1 to return all data. - filter (str): - Reads the TensorboardTimeSeries' data that - match the filter expression. - """ - - tensorboard_time_series = proto.Field( - proto.STRING, - number=1, - ) - max_data_points = proto.Field( - proto.INT32, - number=2, - ) - filter = proto.Field( - proto.STRING, - number=3, - ) - - -class ReadTensorboardTimeSeriesDataResponse(proto.Message): - r"""Response message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - - Attributes: - time_series_data (google.cloud.aiplatform_v1beta1.types.TimeSeriesData): - The returned time series data. - """ - - time_series_data = proto.Field( - proto.MESSAGE, - number=1, - message=tensorboard_data.TimeSeriesData, - ) - - -class WriteTensorboardRunDataRequest(proto.Message): - r"""Request message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - - Attributes: - tensorboard_run (str): - Required. The resource name of the TensorboardRun to write - data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): - Required. The TensorboardTimeSeries data to - write. Values with in a time series are indexed - by their step value. Repeated writes to the same - step will overwrite the existing value for that - step. - The upper limit of data points per write request - is 5000. - """ - - tensorboard_run = proto.Field( - proto.STRING, - number=1, - ) - time_series_data = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=tensorboard_data.TimeSeriesData, - ) - - -class WriteTensorboardRunDataResponse(proto.Message): - r"""Response message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - """ - - -class ExportTensorboardTimeSeriesDataRequest(proto.Message): - r"""Request message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Attributes: - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries to - export data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - filter (str): - Exports the TensorboardTimeSeries' data that - match the filter expression. - page_size (int): - The maximum number of data points to return per page. The - default page_size will be 1000. Values must be between 1 and - 10000. Values above 10000 will be coerced to 10000. - page_token (str): - A page token, received from a previous - [TensorboardService.ExportTensorboardTimeSeries][] call. - Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ExportTensorboardTimeSeries][] must - match the call that provided the page token. - order_by (str): - Field to use to sort the - TensorboardTimeSeries' data. By default, - TensorboardTimeSeries' data will be returned in - a pseudo random order. - """ - - tensorboard_time_series = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - - -class ExportTensorboardTimeSeriesDataResponse(proto.Message): - r"""Response message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Attributes: - time_series_data_points (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): - The returned time series data points. - next_page_token (str): - A token, which can be sent as - [ExportTensorboardTimeSeriesRequest.page_token][] to - retrieve the next page. If this field is omitted, there are - no subsequent pages. - """ - - @property - def raw_page(self): - return self - - time_series_data_points = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=tensorboard_data.TimeSeriesDataPoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class CreateTensorboardOperationMetadata(proto.Message): - r"""Details of operations that perform create Tensorboard. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Tensorboard. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UpdateTensorboardOperationMetadata(proto.Message): - r"""Details of operations that perform update Tensorboard. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Tensorboard. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py deleted file mode 100644 index 298c631fb4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py +++ /dev/null @@ -1,152 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TensorboardTimeSeries', - }, -) - - -class TensorboardTimeSeries(proto.Message): - r"""TensorboardTimeSeries maps to times series produced in - training runs - - Attributes: - name (str): - Output only. Name of the - TensorboardTimeSeries. - display_name (str): - Required. User provided name of this - TensorboardTimeSeries. This value should be - unique among all TensorboardTimeSeries resources - belonging to the same TensorboardRun resource - (parent resource). - description (str): - Description of this TensorboardTimeSeries. - value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): - Required. Immutable. Type of - TensorboardTimeSeries value. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardTimeSeries was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardTimeSeries was last updated. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - plugin_name (str): - Immutable. Name of the plugin this time - series pertain to. Such as Scalar, Tensor, Blob - plugin_data (bytes): - Data of the current plugin, with the size - limited to 65KB. - metadata (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.Metadata): - Output only. Scalar, Tensor, or Blob metadata - for this TensorboardTimeSeries. - """ - class ValueType(proto.Enum): - r"""An enum representing the value type of a - TensorboardTimeSeries. - """ - VALUE_TYPE_UNSPECIFIED = 0 - SCALAR = 1 - TENSOR = 2 - BLOB_SEQUENCE = 3 - - class Metadata(proto.Message): - r"""Describes metadata for a TensorboardTimeSeries. - Attributes: - max_step (int): - Output only. Max step index of all data - points within a TensorboardTimeSeries. - max_wall_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Max wall clock timestamp of all - data points within a TensorboardTimeSeries. - max_blob_sequence_length (int): - Output only. The largest blob sequence length (number of - blobs) of all data points in this time series, if its - ValueType is BLOB_SEQUENCE. - """ - - max_step = proto.Field( - proto.INT64, - number=1, - ) - max_wall_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - max_blob_sequence_length = proto.Field( - proto.INT64, - number=3, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - value_type = proto.Field( - proto.ENUM, - number=4, - enum=ValueType, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - plugin_name = proto.Field( - proto.STRING, - number=8, - ) - plugin_data = proto.Field( - proto.BYTES, - number=9, - ) - metadata = proto.Field( - proto.MESSAGE, - number=10, - message=Metadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py deleted file mode 100644 index 7f53b63004..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py +++ /dev/null @@ -1,548 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TrainingPipeline', - 'InputDataConfig', - 'FractionSplit', - 'FilterSplit', - 'PredefinedSplit', - 'TimestampSplit', - }, -) - - -class TrainingPipeline(proto.Message): - r"""The TrainingPipeline orchestrates tasks associated with training a - Model. It always executes the training task, and optionally may also - export data from Vertex AI's Dataset which becomes the training - input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - Attributes: - name (str): - Output only. Resource name of the - TrainingPipeline. - display_name (str): - Required. The user-defined name of this - TrainingPipeline. - input_data_config (google.cloud.aiplatform_v1beta1.types.InputDataConfig): - Specifies Vertex AI owned input data that may be used for - training the Model. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] - should make clear whether this config is used and if there - are any special requirements on how it should be filled. If - nothing about this config is mentioned in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], - then it should be assumed that the TrainingPipeline does not - depend on this configuration. - training_task_definition (str): - Required. A Google Cloud Storage path to the - YAML file that defines the training task which - is responsible for producing the model artifact, - and may also include additional auxiliary work. - The definition files that can be used here are - found in gs://google-cloud- - aiplatform/schema/trainingjob/definition/. Note: - The URI given on output will be immutable and - probably different, including the URI scheme, - than the one given on input. The output URI will - point to a location where the user only has a - read access. - training_task_inputs (google.protobuf.struct_pb2.Value): - Required. The training task's parameter(s), as specified in - the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s - ``inputs``. - training_task_metadata (google.protobuf.struct_pb2.Value): - Output only. The metadata information as specified in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s - ``metadata``. This metadata is an auxiliary runtime and - final information about the training task. While the - pipeline is running this information is populated only at a - best effort basis. Only present if the pipeline's - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] - contains ``metadata`` object. - model_to_upload (google.cloud.aiplatform_v1beta1.types.Model): - Describes the Model that may be uploaded (via - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]) - by this TrainingPipeline. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] - should make clear whether this Model description should be - populated, and if there are any special requirements - regarding how it should be filled. If nothing is mentioned - in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], - then it should be assumed that this field should not be - filled and the training task either uploads the Model - without a need of this information, or that training task - does not support uploading a Model as part of the pipeline. - When the Pipeline's state becomes - ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been - uploaded into Vertex AI, then the model_to_upload's resource - [name][google.cloud.aiplatform.v1beta1.Model.name] is - populated. The Model is always uploaded into the Project and - Location in which this pipeline is. - state (google.cloud.aiplatform_v1beta1.types.PipelineState): - Output only. The detailed state of the - pipeline. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the pipeline's state is - ``PIPELINE_STATE_FAILED`` or ``PIPELINE_STATE_CANCELLED``. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline - was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline for the first - time entered the ``PIPELINE_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline entered any of - the following states: ``PIPELINE_STATE_SUCCEEDED``, - ``PIPELINE_STATE_FAILED``, ``PIPELINE_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline.LabelsEntry]): - The labels with user-defined metadata to - organize TrainingPipelines. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a TrainingPipeline. - If set, this TrainingPipeline will be secured by this key. - - Note: Model trained by this TrainingPipeline is also secured - by this key if - [model_to_upload][google.cloud.aiplatform.v1beta1.TrainingPipeline.encryption_spec] - is not set separately. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - input_data_config = proto.Field( - proto.MESSAGE, - number=3, - message='InputDataConfig', - ) - training_task_definition = proto.Field( - proto.STRING, - number=4, - ) - training_task_inputs = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.Value, - ) - training_task_metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - model_to_upload = proto.Field( - proto.MESSAGE, - number=7, - message=model.Model, - ) - state = proto.Field( - proto.ENUM, - number=9, - enum=pipeline_state.PipelineState, - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=15, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=18, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class InputDataConfig(proto.Message): - r"""Specifies Vertex AI owned input data to be used for training, - and possibly evaluating, the Model. - - Attributes: - fraction_split (google.cloud.aiplatform_v1beta1.types.FractionSplit): - Split based on fractions defining the size of - each set. - filter_split (google.cloud.aiplatform_v1beta1.types.FilterSplit): - Split based on the provided filters for each - set. - predefined_split (google.cloud.aiplatform_v1beta1.types.PredefinedSplit): - Supported only for tabular Datasets. - Split based on a predefined key. - timestamp_split (google.cloud.aiplatform_v1beta1.types.TimestampSplit): - Supported only for tabular Datasets. - Split based on the timestamp of the input data - pieces. - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Cloud Storage location where the training data is to be - written to. In the given directory a new directory is - created with name: - ``dataset---`` - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format. All training input data is written into that - directory. - - The Vertex AI environment variables representing Cloud - Storage data URIs are represented in the Cloud Storage - wildcard format to support sharded data. e.g.: - "gs://.../training-*.jsonl" - - - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for - tabular data - - - AIP_TRAINING_DATA_URI = - "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" - - - AIP_VALIDATION_DATA_URI = - "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" - - - AIP_TEST_DATA_URI = - "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". - bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): - Only applicable to custom training with tabular Dataset with - BigQuery source. - - The BigQuery project location where the training data is to - be written to. In the given project a new dataset is created - with name - ``dataset___`` - where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All - training input data is written into that dataset. In the - dataset three tables are created, ``training``, - ``validation`` and ``test``. - - - AIP_DATA_FORMAT = "bigquery". - - - AIP_TRAINING_DATA_URI = - "bigquery_destination.dataset\_\ **\ .training" - - - AIP_VALIDATION_DATA_URI = - "bigquery_destination.dataset\_\ **\ .validation" - - - AIP_TEST_DATA_URI = - "bigquery_destination.dataset\_\ **\ .test". - dataset_id (str): - Required. The ID of the Dataset in the same Project and - Location which data will be used to train the Model. The - Dataset must use schema compatible with Model being trained, - and what is compatible should be described in the used - TrainingPipeline's [training_task_definition] - [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. - For tabular Datasets, all their data is exported to - training, to pick and choose from. - annotations_filter (str): - Applicable only to Datasets that have DataItems and - Annotations. - - A filter on Annotations of the Dataset. Only Annotations - that both match this filter and belong to DataItems not - ignored by the split method are used in respectively - training, validation or test role, depending on the role of - the DataItem they are on (for the auto-assigned that role is - decided by Vertex AI). A filter with same syntax as the one - used in - [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations] - may be used, but note here it filters across all Annotations - of the Dataset, and not just within a single DataItem. - annotation_schema_uri (str): - Applicable only to custom training with Datasets that have - DataItems and Annotations. - - Cloud Storage URI that points to a YAML file describing the - annotation schema. The schema is defined as an OpenAPI 3.0.2 - `Schema - Object `__. - The schema files that can be used here are found in - gs://google-cloud-aiplatform/schema/dataset/annotation/ , - note that the chosen schema must be consistent with - [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] - of the Dataset specified by - [dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id]. - - Only Annotations that both match this schema and belong to - DataItems not ignored by the split method are used in - respectively training, validation or test role, depending on - the role of the DataItem they are on. - - When used in conjunction with - [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter], - the Annotations used for training are filtered by both - [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter] - and - [annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri]. - """ - - fraction_split = proto.Field( - proto.MESSAGE, - number=2, - oneof='split', - message='FractionSplit', - ) - filter_split = proto.Field( - proto.MESSAGE, - number=3, - oneof='split', - message='FilterSplit', - ) - predefined_split = proto.Field( - proto.MESSAGE, - number=4, - oneof='split', - message='PredefinedSplit', - ) - timestamp_split = proto.Field( - proto.MESSAGE, - number=5, - oneof='split', - message='TimestampSplit', - ) - gcs_destination = proto.Field( - proto.MESSAGE, - number=8, - oneof='destination', - message=io.GcsDestination, - ) - bigquery_destination = proto.Field( - proto.MESSAGE, - number=10, - oneof='destination', - message=io.BigQueryDestination, - ) - dataset_id = proto.Field( - proto.STRING, - number=1, - ) - annotations_filter = proto.Field( - proto.STRING, - number=6, - ) - annotation_schema_uri = proto.Field( - proto.STRING, - number=9, - ) - - -class FractionSplit(proto.Message): - r"""Assigns the input data to training, validation, and test sets as per - the given fractions. Any of ``training_fraction``, - ``validation_fraction`` and ``test_fraction`` may optionally be - provided, they must sum to up to 1. If the provided ones sum to less - than 1, the remainder is assigned to sets as decided by Vertex AI. - If none of the fractions are set, by default roughly 80% of data is - used for training, 10% for validation, and 10% for test. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - - -class FilterSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on the given filters, data pieces not matched by any - filter are ignored. Currently only supported for Datasets - containing DataItems. - If any of the filters in this message are to match nothing, then - they can be set as '-' (the minus sign). - - Supported only for unstructured Datasets. - - Attributes: - training_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to train the Model. A filter - with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - validation_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to validate the Model. A - filter with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - test_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to test the Model. A filter - with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - """ - - training_filter = proto.Field( - proto.STRING, - number=1, - ) - validation_filter = proto.Field( - proto.STRING, - number=2, - ) - test_filter = proto.Field( - proto.STRING, - number=3, - ) - - -class PredefinedSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on the value of a provided key. - - Supported only for tabular Datasets. - - Attributes: - key (str): - Required. The key is a name of one of the Dataset's data - columns. The value of the key (either the label's value or - value in the column) must be one of {``training``, - ``validation``, ``test``}, and it defines to which set the - given piece of data is assigned. If for a piece of data the - key is not present or has an invalid value, that piece is - ignored by the pipeline. - """ - - key = proto.Field( - proto.STRING, - number=1, - ) - - -class TimestampSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on a provided timestamps. The youngest data pieces are - assigned to training set, next to validation set, and the oldest - to the test set. - Supported only for tabular Datasets. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - key (str): - Required. The key is a name of one of the Dataset's data - columns. The values of the key (the values in the column) - must be in RFC 3339 ``date-time`` format, where - ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). If - for a piece of data the key is not present or has an invalid - value, that piece is ignored by the pipeline. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - key = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py deleted file mode 100644 index 45df0b2e21..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'BoolArray', - 'DoubleArray', - 'Int64Array', - 'StringArray', - }, -) - - -class BoolArray(proto.Message): - r"""A list of boolean values. - Attributes: - values (Sequence[bool]): - A list of bool values. - """ - - values = proto.RepeatedField( - proto.BOOL, - number=1, - ) - - -class DoubleArray(proto.Message): - r"""A list of double values. - Attributes: - values (Sequence[float]): - A list of bool values. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - - -class Int64Array(proto.Message): - r"""A list of int64 values. - Attributes: - values (Sequence[int]): - A list of int64 values. - """ - - values = proto.RepeatedField( - proto.INT64, - number=1, - ) - - -class StringArray(proto.Message): - r"""A list of string values. - Attributes: - values (Sequence[str]): - A list of string values. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py deleted file mode 100644 index 804f7e883d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'UserActionReference', - }, -) - - -class UserActionReference(proto.Message): - r"""References an API call. It contains more information about - long running operation and Jobs that are triggered by the API - call. - - Attributes: - operation (str): - For API calls that return a long running - operation. Resource name of the long running - operation. Format: - 'projects/{project}/locations/{location}/operations/{operation}' - data_labeling_job (str): - For API calls that start a LabelingJob. Resource name of the - LabelingJob. Format: - 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' - method (str): - The method name of the API RPC call. For - example, - "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". - """ - - operation = proto.Field( - proto.STRING, - number=1, - oneof='reference', - ) - data_labeling_job = proto.Field( - proto.STRING, - number=2, - oneof='reference', - ) - method = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py deleted file mode 100644 index 789f7c0840..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Value', - }, -) - - -class Value(proto.Message): - r"""Value is the value of the field. - Attributes: - int_value (int): - An integer value. - double_value (float): - A double value. - string_value (str): - A string value. - """ - - int_value = proto.Field( - proto.INT64, - number=1, - oneof='value', - ) - double_value = proto.Field( - proto.DOUBLE, - number=2, - oneof='value', - ) - string_value = proto.Field( - proto.STRING, - number=3, - oneof='value', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py deleted file mode 100644 index 883b908d5e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py +++ /dev/null @@ -1,588 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'GetStudyRequest', - 'CreateStudyRequest', - 'ListStudiesRequest', - 'ListStudiesResponse', - 'DeleteStudyRequest', - 'LookupStudyRequest', - 'SuggestTrialsRequest', - 'SuggestTrialsResponse', - 'SuggestTrialsMetadata', - 'CreateTrialRequest', - 'GetTrialRequest', - 'ListTrialsRequest', - 'ListTrialsResponse', - 'AddTrialMeasurementRequest', - 'CompleteTrialRequest', - 'DeleteTrialRequest', - 'CheckTrialEarlyStoppingStateRequest', - 'CheckTrialEarlyStoppingStateResponse', - 'CheckTrialEarlyStoppingStateMetatdata', - 'StopTrialRequest', - 'ListOptimalTrialsRequest', - 'ListOptimalTrialsResponse', - }, -) - - -class GetStudyRequest(proto.Message): - r"""Request message for - [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. - - Attributes: - name (str): - Required. The name of the Study resource. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateStudyRequest(proto.Message): - r"""Request message for - [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - CustomJob in. Format: - ``projects/{project}/locations/{location}`` - study (google.cloud.aiplatform_v1beta1.types.Study): - Required. The Study configuration used to - create the Study. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - study = proto.Field( - proto.MESSAGE, - number=2, - message=gca_study.Study, - ) - - -class ListStudiesRequest(proto.Message): - r"""Request message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - Study from. Format: - ``projects/{project}/locations/{location}`` - page_token (str): - Optional. A page token to request the next - page of results. If unspecified, there are no - subsequent pages. - page_size (int): - Optional. The maximum number of studies to - return per "page" of results. If unspecified, - service will pick an appropriate default. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_token = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - - -class ListStudiesResponse(proto.Message): - r"""Response message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - - Attributes: - studies (Sequence[google.cloud.aiplatform_v1beta1.types.Study]): - The studies associated with the project. - next_page_token (str): - Passes this token as the ``page_token`` field of the request - for a subsequent call. If this field is omitted, there are - no subsequent pages. - """ - - @property - def raw_page(self): - return self - - studies = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Study, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteStudyRequest(proto.Message): - r"""Request message for - [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. - - Attributes: - name (str): - Required. The name of the Study resource to be deleted. - Format: - ``projects/{project}/locations/{location}/studies/{study}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class LookupStudyRequest(proto.Message): - r"""Request message for - [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. - - Attributes: - parent (str): - Required. The resource name of the Location to get the Study - from. Format: ``projects/{project}/locations/{location}`` - display_name (str): - Required. The user-defined display name of - the Study - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - - -class SuggestTrialsRequest(proto.Message): - r"""Request message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - - Attributes: - parent (str): - Required. The project and location that the Study belongs - to. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - suggestion_count (int): - Required. The number of suggestions - requested. - client_id (str): - Required. The identifier of the client that is requesting - the suggestion. - - If multiple SuggestTrialsRequests have the same - ``client_id``, the service will return the identical - suggested Trial if the Trial is pending, and provide a new - Trial if the last suggested Trial was completed. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - suggestion_count = proto.Field( - proto.INT32, - number=2, - ) - client_id = proto.Field( - proto.STRING, - number=3, - ) - - -class SuggestTrialsResponse(proto.Message): - r"""Response message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - - Attributes: - trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): - A list of Trials. - study_state (google.cloud.aiplatform_v1beta1.types.Study.State): - The state of the Study. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation was started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which operation processing - completed. - """ - - trials = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Trial, - ) - study_state = proto.Field( - proto.ENUM, - number=2, - enum=gca_study.Study.State, - ) - start_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class SuggestTrialsMetadata(proto.Message): - r"""Details of operations that perform Trials suggestion. - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for suggesting Trials. - client_id (str): - The identifier of the client that is requesting the - suggestion. - - If multiple SuggestTrialsRequests have the same - ``client_id``, the service will return the identical - suggested Trial if the Trial is pending, and provide a new - Trial if the last suggested Trial was completed. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - client_id = proto.Field( - proto.STRING, - number=2, - ) - - -class CreateTrialRequest(proto.Message): - r"""Request message for - [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. - - Attributes: - parent (str): - Required. The resource name of the Study to create the Trial - in. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - trial (google.cloud.aiplatform_v1beta1.types.Trial): - Required. The Trial to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - trial = proto.Field( - proto.MESSAGE, - number=2, - message=gca_study.Trial, - ) - - -class GetTrialRequest(proto.Message): - r"""Request message for - [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. - - Attributes: - name (str): - Required. The name of the Trial resource. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTrialsRequest(proto.Message): - r"""Request message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - - Attributes: - parent (str): - Required. The resource name of the Study to list the Trial - from. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - page_token (str): - Optional. A page token to request the next - page of results. If unspecified, there are no - subsequent pages. - page_size (int): - Optional. The number of Trials to retrieve - per "page" of results. If unspecified, the - service will pick an appropriate default. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_token = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - - -class ListTrialsResponse(proto.Message): - r"""Response message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - - Attributes: - trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): - The Trials associated with the Study. - next_page_token (str): - Pass this token as the ``page_token`` field of the request - for a subsequent call. If this field is omitted, there are - no subsequent pages. - """ - - @property - def raw_page(self): - return self - - trials = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Trial, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class AddTrialMeasurementRequest(proto.Message): - r"""Request message for - [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. - - Attributes: - trial_name (str): - Required. The name of the trial to add measurement. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - measurement (google.cloud.aiplatform_v1beta1.types.Measurement): - Required. The measurement to be added to a - Trial. - """ - - trial_name = proto.Field( - proto.STRING, - number=1, - ) - measurement = proto.Field( - proto.MESSAGE, - number=3, - message=gca_study.Measurement, - ) - - -class CompleteTrialRequest(proto.Message): - r"""Request message for - [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. - - Attributes: - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - final_measurement (google.cloud.aiplatform_v1beta1.types.Measurement): - Optional. If provided, it will be used as the completed - Trial's final_measurement; Otherwise, the service will - auto-select a previously reported measurement as the - final-measurement - trial_infeasible (bool): - Optional. True if the Trial cannot be run with the given - Parameter, and final_measurement will be ignored. - infeasible_reason (str): - Optional. A human readable reason why the trial was - infeasible. This should only be provided if - ``trial_infeasible`` is true. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - final_measurement = proto.Field( - proto.MESSAGE, - number=2, - message=gca_study.Measurement, - ) - trial_infeasible = proto.Field( - proto.BOOL, - number=3, - ) - infeasible_reason = proto.Field( - proto.STRING, - number=4, - ) - - -class DeleteTrialRequest(proto.Message): - r"""Request message for - [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. - - Attributes: - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CheckTrialEarlyStoppingStateRequest(proto.Message): - r"""Request message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - - Attributes: - trial_name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - trial_name = proto.Field( - proto.STRING, - number=1, - ) - - -class CheckTrialEarlyStoppingStateResponse(proto.Message): - r"""Response message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - - Attributes: - should_stop (bool): - True if the Trial should stop. - """ - - should_stop = proto.Field( - proto.BOOL, - number=1, - ) - - -class CheckTrialEarlyStoppingStateMetatdata(proto.Message): - r"""This message will be placed in the metadata field of a - google.longrunning.Operation associated with a - CheckTrialEarlyStoppingState request. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for suggesting Trials. - study (str): - The name of the Study that the Trial belongs - to. - trial (str): - The Trial name. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - study = proto.Field( - proto.STRING, - number=2, - ) - trial = proto.Field( - proto.STRING, - number=3, - ) - - -class StopTrialRequest(proto.Message): - r"""Request message for - [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. - - Attributes: - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListOptimalTrialsRequest(proto.Message): - r"""Request message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - - Attributes: - parent (str): - Required. The name of the Study that the - optimal Trial belongs to. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - - -class ListOptimalTrialsResponse(proto.Message): - r"""Response message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - - Attributes: - optimal_trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): - The pareto-optimal Trials for multiple objective Study or - the optimal trial for single objective Study. The definition - of pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - """ - - optimal_trials = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Trial, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/mypy.ini b/owl-bot-staging/v1beta1/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1beta1/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1beta1/noxfile.py b/owl-bot-staging/v1beta1/noxfile.py deleted file mode 100644 index ff8279ad3e..0000000000 --- a/owl-bot-staging/v1beta1/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/.coveragerc b/owl-bot-staging/v1beta1/schema/predict/instance/.coveragerc deleted file mode 100644 index 39419c276e..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/MANIFEST.in b/owl-bot-staging/v1beta1/schema/predict/instance/MANIFEST.in deleted file mode 100644 index 8b11497385..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1beta1/schema/predict/instance *.py -recursive-include google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/README.rst b/owl-bot-staging/v1beta1/schema/predict/instance/README.rst deleted file mode 100644 index a45a5dce1c..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1beta1 Schema Predict Instance API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1beta1 Schema Predict Instance API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/docs/conf.py b/owl-bot-staging/v1beta1/schema/predict/instance/docs/conf.py deleted file mode 100644 index 54743ec93b..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1beta1-schema-predict-instance documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1beta1-schema-predict-instance" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1beta1 Schema Predict Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-predict-instance-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-predict-instance.tex", - u"google-cloud-aiplatform-v1beta1-schema-predict-instance Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-predict-instance", - u"Google Cloud Aiplatform V1beta1 Schema Predict Instance Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-predict-instance", - u"google-cloud-aiplatform-v1beta1-schema-predict-instance Documentation", - author, - "google-cloud-aiplatform-v1beta1-schema-predict-instance", - "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Predict Instance API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/docs/index.rst b/owl-bot-staging/v1beta1/schema/predict/instance/docs/index.rst deleted file mode 100644 index 8702e5d3aa..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - instance_v1beta1/services - instance_v1beta1/types diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/services.rst b/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/services.rst deleted file mode 100644 index 941dbcca59..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API -================================================================================ -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/types.rst b/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/types.rst deleted file mode 100644 index 7caa088065..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/docs/instance_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API -============================================================================= - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py deleted file mode 100644 index 5f9e065de0..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ImageClassificationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_segmentation import ImageSegmentationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_classification import TextClassificationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_extraction import TextExtractionPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_sentiment import TextSentimentPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_classification import VideoClassificationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ('ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed deleted file mode 100644 index 46ccbaf568..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py deleted file mode 100644 index 41ab5407a7..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionInstance -from .types.image_object_detection import ImageObjectDetectionPredictionInstance -from .types.image_segmentation import ImageSegmentationPredictionInstance -from .types.text_classification import TextClassificationPredictionInstance -from .types.text_extraction import TextExtractionPredictionInstance -from .types.text_sentiment import TextSentimentPredictionInstance -from .types.video_action_recognition import VideoActionRecognitionPredictionInstance -from .types.video_classification import VideoClassificationPredictionInstance -from .types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ( -'ImageClassificationPredictionInstance', -'ImageObjectDetectionPredictionInstance', -'ImageSegmentationPredictionInstance', -'TextClassificationPredictionInstance', -'TextExtractionPredictionInstance', -'TextSentimentPredictionInstance', -'VideoActionRecognitionPredictionInstance', -'VideoClassificationPredictionInstance', -'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json deleted file mode 100644 index 38379e8208..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed deleted file mode 100644 index 46ccbaf568..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py deleted file mode 100644 index 80a5332604..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionInstance, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionInstance, -) -from .image_segmentation import ( - ImageSegmentationPredictionInstance, -) -from .text_classification import ( - TextClassificationPredictionInstance, -) -from .text_extraction import ( - TextExtractionPredictionInstance, -) -from .text_sentiment import ( - TextSentimentPredictionInstance, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionInstance, -) -from .video_classification import ( - VideoClassificationPredictionInstance, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionInstance, -) - -__all__ = ( - 'ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py deleted file mode 100644 index aac9e2bc91..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'ImageClassificationPredictionInstance', - }, -) - - -class ImageClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Image Classification. - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py deleted file mode 100644 index 80a5d797a1..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'ImageObjectDetectionPredictionInstance', - }, -) - - -class ImageObjectDetectionPredictionInstance(proto.Message): - r"""Prediction input format for Image Object Detection. - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py deleted file mode 100644 index e1b5cfc21f..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'ImageSegmentationPredictionInstance', - }, -) - - -class ImageSegmentationPredictionInstance(proto.Message): - r"""Prediction input format for Image Segmentation. - Attributes: - content (str): - The image bytes to make the predictions on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/png - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py deleted file mode 100644 index 0c1ea43a72..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'TextClassificationPredictionInstance', - }, -) - - -class TextClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Text Classification. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py deleted file mode 100644 index 8f47b27080..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'TextExtractionPredictionInstance', - }, -) - - -class TextExtractionPredictionInstance(proto.Message): - r"""Prediction input format for Text Extraction. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - key (str): - This field is only used for batch prediction. - If a key is provided, the batch prediction - result will by mapped to this key. If omitted, - then the batch prediction result will contain - the entire input instance. Vertex AI will not - check if keys in the request are duplicates, so - it is up to the caller to ensure the keys are - unique. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - key = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py deleted file mode 100644 index ab416779b6..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'TextSentimentPredictionInstance', - }, -) - - -class TextSentimentPredictionInstance(proto.Message): - r"""Prediction input format for Text Sentiment. - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py deleted file mode 100644 index c7a76efda2..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'VideoActionRecognitionPredictionInstance', - }, -) - - -class VideoActionRecognitionPredictionInstance(proto.Message): - r"""Prediction input format for Video Action Recognition. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py deleted file mode 100644 index 56d662ef88..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'VideoClassificationPredictionInstance', - }, -) - - -class VideoClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Video Classification. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py deleted file mode 100644 index 7344d419a8..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'VideoObjectTrackingPredictionInstance', - }, -) - - -class VideoObjectTrackingPredictionInstance(proto.Message): - r"""Prediction input format for Video Object Tracking. - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/mypy.ini b/owl-bot-staging/v1beta1/schema/predict/instance/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/noxfile.py b/owl-bot-staging/v1beta1/schema/predict/instance/noxfile.py deleted file mode 100644 index c6550a646b..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py b/owl-bot-staging/v1beta1/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py deleted file mode 100644 index d9e8bd0b17..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/scripts/fixup_instance_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class instanceCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=instanceCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the instance client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/setup.py b/owl-bot-staging/v1beta1/schema/predict/instance/setup.py deleted file mode 100644 index 203e4d1710..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1beta1-schema-predict-instance', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.predict'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/tests/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/instance/tests/unit/gapic/instance_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/.coveragerc b/owl-bot-staging/v1beta1/schema/predict/params/.coveragerc deleted file mode 100644 index c065601f87..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/schema/predict/params/MANIFEST.in b/owl-bot-staging/v1beta1/schema/predict/params/MANIFEST.in deleted file mode 100644 index d5e40e84cb..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1beta1/schema/predict/params *.py -recursive-include google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/schema/predict/params/README.rst b/owl-bot-staging/v1beta1/schema/predict/params/README.rst deleted file mode 100644 index 7f0e1dfe6c..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1beta1 Schema Predict Params API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1beta1 Schema Predict Params API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/schema/predict/params/docs/conf.py b/owl-bot-staging/v1beta1/schema/predict/params/docs/conf.py deleted file mode 100644 index 2c264e47b8..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1beta1-schema-predict-params documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1beta1-schema-predict-params" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1beta1 Schema Predict Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-predict-params-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-predict-params.tex", - u"google-cloud-aiplatform-v1beta1-schema-predict-params Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-predict-params", - u"Google Cloud Aiplatform V1beta1 Schema Predict Params Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-predict-params", - u"google-cloud-aiplatform-v1beta1-schema-predict-params Documentation", - author, - "google-cloud-aiplatform-v1beta1-schema-predict-params", - "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Predict Params API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/schema/predict/params/docs/index.rst b/owl-bot-staging/v1beta1/schema/predict/params/docs/index.rst deleted file mode 100644 index 7f7e36e347..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - params_v1beta1/services - params_v1beta1/types diff --git a/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/services.rst b/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/services.rst deleted file mode 100644 index b3b897a0f4..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API -============================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/types.rst b/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/types.rst deleted file mode 100644 index 722a1d8ba0..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/docs/params_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API -=========================================================================== - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py deleted file mode 100644 index 464c39f26c..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ImageClassificationPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_segmentation import ImageSegmentationPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_classification import VideoClassificationPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ('ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed deleted file mode 100644 index acdcd7bc60..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py deleted file mode 100644 index 91b718b437..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionParams -from .types.image_object_detection import ImageObjectDetectionPredictionParams -from .types.image_segmentation import ImageSegmentationPredictionParams -from .types.video_action_recognition import VideoActionRecognitionPredictionParams -from .types.video_classification import VideoClassificationPredictionParams -from .types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ( -'ImageClassificationPredictionParams', -'ImageObjectDetectionPredictionParams', -'ImageSegmentationPredictionParams', -'VideoActionRecognitionPredictionParams', -'VideoClassificationPredictionParams', -'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json deleted file mode 100644 index 6b925dd9dc..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed deleted file mode 100644 index acdcd7bc60..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py deleted file mode 100644 index 70a92bb59c..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionParams, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionParams, -) -from .image_segmentation import ( - ImageSegmentationPredictionParams, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionParams, -) -from .video_classification import ( - VideoClassificationPredictionParams, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionParams, -) - -__all__ = ( - 'ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py deleted file mode 100644 index 67c5453a93..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'ImageClassificationPredictionParams', - }, -) - - -class ImageClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Classification. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py deleted file mode 100644 index baed8905ee..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'ImageObjectDetectionPredictionParams', - }, -) - - -class ImageObjectDetectionPredictionParams(proto.Message): - r"""Prediction model parameters for Image Object Detection. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - Note that number of returned predictions is also - limited by metadata's predictionsLimit. Default - value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py deleted file mode 100644 index 8a5e999504..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'ImageSegmentationPredictionParams', - }, -) - - -class ImageSegmentationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Segmentation. - Attributes: - confidence_threshold (float): - When the model predicts category of pixels of - the image, it will only provide predictions for - pixels that it is at least this much confident - about. All other pixels will be classified as - background. Default value is 0.5. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py deleted file mode 100644 index 37a8c2bc9c..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'VideoActionRecognitionPredictionParams', - }, -) - - -class VideoActionRecognitionPredictionParams(proto.Message): - r"""Prediction model parameters for Video Action Recognition. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py deleted file mode 100644 index ff7bbd1b86..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'VideoClassificationPredictionParams', - }, -) - - -class VideoClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Video Classification. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is - 10,000. - segment_classification (bool): - Set to true to request segment-level - classification. Vertex AI returns labels and - their confidence scores for the entire time - segment of the video that user specified in the - input instance. Default value is true - shot_classification (bool): - Set to true to request shot-level - classification. Vertex AI determines the - boundaries for each camera shot in the entire - time segment of the video that user specified in - the input instance. Vertex AI then returns - labels and their confidence scores for each - detected shot, along with the start and end time - of the shot. - WARNING: Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. - Default value is false - one_sec_interval_classification (bool): - Set to true to request classification for a - video at one-second intervals. Vertex AI returns - labels and their confidence scores for each - second of the entire time segment of the video - that user specified in the input WARNING: Model - evaluation is not done for this classification - type, the quality of it depends on the training - data, but there are no metrics provided to - describe that quality. Default value is false - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - segment_classification = proto.Field( - proto.BOOL, - number=3, - ) - shot_classification = proto.Field( - proto.BOOL, - number=4, - ) - one_sec_interval_classification = proto.Field( - proto.BOOL, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py deleted file mode 100644 index 4e0e97f8d6..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'VideoObjectTrackingPredictionParams', - }, -) - - -class VideoObjectTrackingPredictionParams(proto.Message): - r"""Prediction model parameters for Video Object Tracking. - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - min_bounding_box_size (float): - Only bounding boxes with shortest edge at - least that long as a relative value of video - frame size are returned. Default value is 0.0. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - min_bounding_box_size = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/mypy.ini b/owl-bot-staging/v1beta1/schema/predict/params/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1beta1/schema/predict/params/noxfile.py b/owl-bot-staging/v1beta1/schema/predict/params/noxfile.py deleted file mode 100644 index 8af100d51e..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py b/owl-bot-staging/v1beta1/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py deleted file mode 100644 index 17915abed8..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/scripts/fixup_params_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class paramsCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=paramsCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the params client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/setup.py b/owl-bot-staging/v1beta1/schema/predict/params/setup.py deleted file mode 100644 index 573175f19b..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1beta1-schema-predict-params', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.predict'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1beta1/schema/predict/params/tests/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/params/tests/unit/gapic/params_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/.coveragerc b/owl-bot-staging/v1beta1/schema/predict/prediction/.coveragerc deleted file mode 100644 index 1e0399e149..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/MANIFEST.in b/owl-bot-staging/v1beta1/schema/predict/prediction/MANIFEST.in deleted file mode 100644 index 0a3bcaf660..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1beta1/schema/predict/prediction *.py -recursive-include google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/README.rst b/owl-bot-staging/v1beta1/schema/predict/prediction/README.rst deleted file mode 100644 index 6c2ca66e04..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1beta1 Schema Predict Prediction API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1beta1 Schema Predict Prediction API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/conf.py b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/conf.py deleted file mode 100644 index f0ddfdbe71..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1beta1-schema-predict-prediction documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1beta1-schema-predict-prediction" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1beta1 Schema Predict Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-predict-prediction-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-predict-prediction.tex", - u"google-cloud-aiplatform-v1beta1-schema-predict-prediction Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-predict-prediction", - u"Google Cloud Aiplatform V1beta1 Schema Predict Prediction Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-predict-prediction", - u"google-cloud-aiplatform-v1beta1-schema-predict-prediction Documentation", - author, - "google-cloud-aiplatform-v1beta1-schema-predict-prediction", - "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Predict Prediction API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/index.rst b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/index.rst deleted file mode 100644 index 7fc86a6ed5..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - prediction_v1beta1/services - prediction_v1beta1/types diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/services.rst b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/services.rst deleted file mode 100644 index 6de5e17520..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API -================================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/types.rst b/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/types.rst deleted file mode 100644 index b14182d6d7..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/docs/prediction_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API -=============================================================================== - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py deleted file mode 100644 index 0b54451ca0..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ClassificationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_segmentation import ImageSegmentationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_classification import TabularClassificationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_regression import TabularRegressionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_extraction import TextExtractionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import TextSentimentPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import TimeSeriesForecastingPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_classification import VideoClassificationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ('ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'TimeSeriesForecastingPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed deleted file mode 100644 index 8cf97d7107..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py deleted file mode 100644 index 495759c24b..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.classification import ClassificationPredictionResult -from .types.image_object_detection import ImageObjectDetectionPredictionResult -from .types.image_segmentation import ImageSegmentationPredictionResult -from .types.tabular_classification import TabularClassificationPredictionResult -from .types.tabular_regression import TabularRegressionPredictionResult -from .types.text_extraction import TextExtractionPredictionResult -from .types.text_sentiment import TextSentimentPredictionResult -from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult -from .types.video_action_recognition import VideoActionRecognitionPredictionResult -from .types.video_classification import VideoClassificationPredictionResult -from .types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ( -'ClassificationPredictionResult', -'ImageObjectDetectionPredictionResult', -'ImageSegmentationPredictionResult', -'TabularClassificationPredictionResult', -'TabularRegressionPredictionResult', -'TextExtractionPredictionResult', -'TextSentimentPredictionResult', -'TimeSeriesForecastingPredictionResult', -'VideoActionRecognitionPredictionResult', -'VideoClassificationPredictionResult', -'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json deleted file mode 100644 index 99d3dc6402..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed deleted file mode 100644 index 8cf97d7107..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py deleted file mode 100644 index f3b70f66dd..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .classification import ( - ClassificationPredictionResult, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionResult, -) -from .image_segmentation import ( - ImageSegmentationPredictionResult, -) -from .tabular_classification import ( - TabularClassificationPredictionResult, -) -from .tabular_regression import ( - TabularRegressionPredictionResult, -) -from .text_extraction import ( - TextExtractionPredictionResult, -) -from .text_sentiment import ( - TextSentimentPredictionResult, -) -from .time_series_forecasting import ( - TimeSeriesForecastingPredictionResult, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionResult, -) -from .video_classification import ( - VideoClassificationPredictionResult, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionResult, -) - -__all__ = ( - 'ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'TimeSeriesForecastingPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py deleted file mode 100644 index ecd16c7250..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'ClassificationPredictionResult', - }, -) - - -class ClassificationPredictionResult(proto.Message): - r"""Prediction output format for Image and Text Classification. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py deleted file mode 100644 index d787871e99..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'ImageObjectDetectionPredictionResult', - }, -) - - -class ImageObjectDetectionPredictionResult(proto.Message): - r"""Prediction output format for Image Object Detection. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): - Bounding boxes, i.e. the rectangles over the image, that - pinpoint the found AnnotationSpecs. Given in order that - matches the IDs. Each bounding box is an array of 4 numbers - ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent - the extremal coordinates of the box. They are relative to - the image size, and the point 0,0 is in the top left of the - image. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - bboxes = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=struct_pb2.ListValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py deleted file mode 100644 index 92cc20720c..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'ImageSegmentationPredictionResult', - }, -) - - -class ImageSegmentationPredictionResult(proto.Message): - r"""Prediction output format for Image Segmentation. - Attributes: - category_mask (str): - A PNG image where each pixel in the mask - represents the category in which the pixel in - the original image was predicted to belong to. - The size of this image will be the same as the - original image. The mapping between the - AnntoationSpec and the color can be found in - model's metadata. The model will choose the most - likely category and if none of the categories - reach the confidence threshold, the pixel will - be marked as background. - confidence_mask (str): - A one channel image which is encoded as an - 8bit lossless PNG. The size of the image will be - the same as the original image. For a specific - pixel, darker color means less confidence in - correctness of the cateogry in the categoryMask - for the corresponding pixel. Black means no - confidence and white means complete confidence. - """ - - category_mask = proto.Field( - proto.STRING, - number=1, - ) - confidence_mask = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py deleted file mode 100644 index 8a437022fd..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TabularClassificationPredictionResult', - }, -) - - -class TabularClassificationPredictionResult(proto.Message): - r"""Prediction output format for Tabular Classification. - Attributes: - classes (Sequence[str]): - The name of the classes being classified, - contains all possible values of the target - column. - scores (Sequence[float]): - The model's confidence in each class being - correct, higher value means higher confidence. - The N-th score corresponds to the N-th class in - classes. - """ - - classes = proto.RepeatedField( - proto.STRING, - number=1, - ) - scores = proto.RepeatedField( - proto.FLOAT, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py deleted file mode 100644 index a49f6f55ce..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TabularRegressionPredictionResult', - }, -) - - -class TabularRegressionPredictionResult(proto.Message): - r"""Prediction output format for Tabular Regression. - Attributes: - value (float): - The regression value. - lower_bound (float): - The lower bound of the prediction interval. - upper_bound (float): - The upper bound of the prediction interval. - """ - - value = proto.Field( - proto.FLOAT, - number=1, - ) - lower_bound = proto.Field( - proto.FLOAT, - number=2, - ) - upper_bound = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py deleted file mode 100644 index a92d9caefa..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TextExtractionPredictionResult', - }, -) - - -class TextExtractionPredictionResult(proto.Message): - r"""Prediction output format for Text Extraction. - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - text_segment_start_offsets (Sequence[int]): - The start offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - text_segment_end_offsets (Sequence[int]): - The end offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - text_segment_start_offsets = proto.RepeatedField( - proto.INT64, - number=3, - ) - text_segment_end_offsets = proto.RepeatedField( - proto.INT64, - number=4, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py deleted file mode 100644 index 4967b02aae..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TextSentimentPredictionResult', - }, -) - - -class TextSentimentPredictionResult(proto.Message): - r"""Prediction output format for Text Sentiment - Attributes: - sentiment (int): - The integer sentiment labels between 0 - (inclusive) and sentimentMax label (inclusive), - while 0 maps to the least positive sentiment and - sentimentMax maps to the most positive one. The - higher the score is, the more positive the - sentiment in the text snippet is. Note: - sentimentMax is an integer value between 1 - (inclusive) and 10 (inclusive). - """ - - sentiment = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py deleted file mode 100644 index 67a3cd9dff..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TimeSeriesForecastingPredictionResult', - }, -) - - -class TimeSeriesForecastingPredictionResult(proto.Message): - r"""Prediction output format for Time Series Forecasting. - Attributes: - value (float): - The regression value. - """ - - value = proto.Field( - proto.FLOAT, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py deleted file mode 100644 index bc53328da4..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'VideoActionRecognitionPredictionResult', - }, -) - - -class VideoActionRecognitionPredictionResult(proto.Message): - r"""Prediction output format for Video Action Recognition. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py deleted file mode 100644 index 95439add5e..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'VideoClassificationPredictionResult', - }, -) - - -class VideoClassificationPredictionResult(proto.Message): - r"""Prediction output format for Video Classification. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - type_ (str): - The type of the prediction. The requested - types can be configured via parameters. This - will be one of - segment-classification - - shot-classification - - one-sec-interval-classification - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentStart' from the - input instance, for other types it is the start - of a shot or a 1 second interval respectively. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentEnd' from the - input instance, for other types it is the end of - a shot or a 1 second interval respectively. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - type_ = proto.Field( - proto.STRING, - number=3, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py deleted file mode 100644 index 34cf7ab1b9..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py +++ /dev/null @@ -1,144 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'VideoObjectTrackingPredictionResult', - }, -) - - -class VideoObjectTrackingPredictionResult(proto.Message): - r"""Prediction output format for Video Object Tracking. - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - frames (Sequence[google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.VideoObjectTrackingPredictionResult.Frame]): - All of the frames of the video in which a - single object instance has been detected. The - bounding boxes in the frames identify the same - object. - """ - - class Frame(proto.Message): - r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a - bounding box, i.e. the rectangle over the video frame pinpointing - the found AnnotationSpec. The coordinates are relative to the frame - size, and the point 0,0 is in the top left of the frame. - - Attributes: - time_offset (google.protobuf.duration_pb2.Duration): - A time (frame) of a video in which the object - has been detected. Expressed as a number of - seconds as measured from the start of the video, - with fractions up to a microsecond precision, - and with "s" appended at the end. - x_min (google.protobuf.wrappers_pb2.FloatValue): - The leftmost coordinate of the bounding box. - x_max (google.protobuf.wrappers_pb2.FloatValue): - The rightmost coordinate of the bounding box. - y_min (google.protobuf.wrappers_pb2.FloatValue): - The topmost coordinate of the bounding box. - y_max (google.protobuf.wrappers_pb2.FloatValue): - The bottommost coordinate of the bounding - box. - """ - - time_offset = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - x_min = proto.Field( - proto.MESSAGE, - number=2, - message=wrappers_pb2.FloatValue, - ) - x_max = proto.Field( - proto.MESSAGE, - number=3, - message=wrappers_pb2.FloatValue, - ) - y_min = proto.Field( - proto.MESSAGE, - number=4, - message=wrappers_pb2.FloatValue, - ) - y_max = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=3, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - frames = proto.RepeatedField( - proto.MESSAGE, - number=6, - message=Frame, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/mypy.ini b/owl-bot-staging/v1beta1/schema/predict/prediction/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/noxfile.py b/owl-bot-staging/v1beta1/schema/predict/prediction/noxfile.py deleted file mode 100644 index 112aeac644..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py b/owl-bot-staging/v1beta1/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py deleted file mode 100644 index 7860bb63cf..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class predictionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=predictionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the prediction client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/setup.py b/owl-bot-staging/v1beta1/schema/predict/prediction/setup.py deleted file mode 100644 index 4044a5396a..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1beta1-schema-predict-prediction', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.predict'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/schema/predict/prediction/tests/unit/gapic/prediction_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py deleted file mode 100644 index 8fdb92d15f..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py +++ /dev/null @@ -1,348 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class aiplatformCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'add_context_artifacts_and_executions': ('context', 'artifacts', 'executions', ), - 'add_context_children': ('context', 'child_contexts', ), - 'add_execution_events': ('execution', 'events', ), - 'add_trial_measurement': ('trial_name', 'measurement', ), - 'batch_create_features': ('parent', 'requests', ), - 'batch_migrate_resources': ('parent', 'migrate_resource_requests', ), - 'batch_read_feature_values': ('featurestore', 'destination', 'entity_type_specs', 'csv_read_instances', ), - 'cancel_batch_prediction_job': ('name', ), - 'cancel_custom_job': ('name', ), - 'cancel_data_labeling_job': ('name', ), - 'cancel_hyperparameter_tuning_job': ('name', ), - 'cancel_pipeline_job': ('name', ), - 'cancel_training_pipeline': ('name', ), - 'check_trial_early_stopping_state': ('trial_name', ), - 'complete_trial': ('name', 'final_measurement', 'trial_infeasible', 'infeasible_reason', ), - 'create_artifact': ('parent', 'artifact', 'artifact_id', ), - 'create_batch_prediction_job': ('parent', 'batch_prediction_job', ), - 'create_context': ('parent', 'context', 'context_id', ), - 'create_custom_job': ('parent', 'custom_job', ), - 'create_data_labeling_job': ('parent', 'data_labeling_job', ), - 'create_dataset': ('parent', 'dataset', ), - 'create_endpoint': ('parent', 'endpoint', ), - 'create_entity_type': ('parent', 'entity_type_id', 'entity_type', ), - 'create_execution': ('parent', 'execution', 'execution_id', ), - 'create_feature': ('parent', 'feature', 'feature_id', ), - 'create_featurestore': ('parent', 'featurestore', 'featurestore_id', ), - 'create_hyperparameter_tuning_job': ('parent', 'hyperparameter_tuning_job', ), - 'create_index': ('parent', 'index', ), - 'create_index_endpoint': ('parent', 'index_endpoint', ), - 'create_metadata_schema': ('parent', 'metadata_schema', 'metadata_schema_id', ), - 'create_metadata_store': ('parent', 'metadata_store', 'metadata_store_id', ), - 'create_model_deployment_monitoring_job': ('parent', 'model_deployment_monitoring_job', ), - 'create_pipeline_job': ('parent', 'pipeline_job', 'pipeline_job_id', ), - 'create_specialist_pool': ('parent', 'specialist_pool', ), - 'create_study': ('parent', 'study', ), - 'create_tensorboard': ('parent', 'tensorboard', ), - 'create_tensorboard_experiment': ('parent', 'tensorboard_experiment_id', 'tensorboard_experiment', ), - 'create_tensorboard_run': ('parent', 'tensorboard_run', 'tensorboard_run_id', ), - 'create_tensorboard_time_series': ('parent', 'tensorboard_time_series', 'tensorboard_time_series_id', ), - 'create_training_pipeline': ('parent', 'training_pipeline', ), - 'create_trial': ('parent', 'trial', ), - 'delete_batch_prediction_job': ('name', ), - 'delete_context': ('name', 'force', ), - 'delete_custom_job': ('name', ), - 'delete_data_labeling_job': ('name', ), - 'delete_dataset': ('name', ), - 'delete_endpoint': ('name', ), - 'delete_entity_type': ('name', 'force', ), - 'delete_feature': ('name', ), - 'delete_featurestore': ('name', 'force', ), - 'delete_hyperparameter_tuning_job': ('name', ), - 'delete_index': ('name', ), - 'delete_index_endpoint': ('name', ), - 'delete_metadata_store': ('name', 'force', ), - 'delete_model': ('name', ), - 'delete_model_deployment_monitoring_job': ('name', ), - 'delete_pipeline_job': ('name', ), - 'delete_specialist_pool': ('name', 'force', ), - 'delete_study': ('name', ), - 'delete_tensorboard': ('name', ), - 'delete_tensorboard_experiment': ('name', ), - 'delete_tensorboard_run': ('name', ), - 'delete_tensorboard_time_series': ('name', ), - 'delete_training_pipeline': ('name', ), - 'delete_trial': ('name', ), - 'deploy_index': ('index_endpoint', 'deployed_index', ), - 'deploy_model': ('endpoint', 'deployed_model', 'traffic_split', ), - 'explain': ('endpoint', 'instances', 'parameters', 'explanation_spec_override', 'deployed_model_id', ), - 'export_data': ('name', 'export_config', ), - 'export_feature_values': ('entity_type', 'destination', 'feature_selector', 'snapshot_export', 'settings', ), - 'export_model': ('name', 'output_config', ), - 'export_tensorboard_time_series_data': ('tensorboard_time_series', 'filter', 'page_size', 'page_token', 'order_by', ), - 'get_annotation_spec': ('name', 'read_mask', ), - 'get_artifact': ('name', ), - 'get_batch_prediction_job': ('name', ), - 'get_context': ('name', ), - 'get_custom_job': ('name', ), - 'get_data_labeling_job': ('name', ), - 'get_dataset': ('name', 'read_mask', ), - 'get_endpoint': ('name', ), - 'get_entity_type': ('name', ), - 'get_execution': ('name', ), - 'get_feature': ('name', ), - 'get_featurestore': ('name', ), - 'get_hyperparameter_tuning_job': ('name', ), - 'get_index': ('name', ), - 'get_index_endpoint': ('name', ), - 'get_metadata_schema': ('name', ), - 'get_metadata_store': ('name', ), - 'get_model': ('name', ), - 'get_model_deployment_monitoring_job': ('name', ), - 'get_model_evaluation': ('name', ), - 'get_model_evaluation_slice': ('name', ), - 'get_pipeline_job': ('name', ), - 'get_specialist_pool': ('name', ), - 'get_study': ('name', ), - 'get_tensorboard': ('name', ), - 'get_tensorboard_experiment': ('name', ), - 'get_tensorboard_run': ('name', ), - 'get_tensorboard_time_series': ('name', ), - 'get_training_pipeline': ('name', ), - 'get_trial': ('name', ), - 'import_data': ('name', 'import_configs', ), - 'import_feature_values': ('entity_type', 'feature_specs', 'avro_source', 'bigquery_source', 'csv_source', 'feature_time_field', 'feature_time', 'entity_id_field', 'disable_online_serving', 'worker_count', ), - 'list_annotations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_artifacts': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_batch_prediction_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_contexts': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_custom_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_data_items': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_data_labeling_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_entity_types': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_executions': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_features': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', 'latest_stats_count', ), - 'list_featurestores': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_hyperparameter_tuning_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_index_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_indexes': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_metadata_schemas': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_metadata_stores': ('parent', 'page_size', 'page_token', ), - 'list_model_deployment_monitoring_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_model_evaluation_slices': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_models': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_optimal_trials': ('parent', ), - 'list_pipeline_jobs': ('parent', 'filter', 'page_size', 'page_token', ), - 'list_specialist_pools': ('parent', 'page_size', 'page_token', 'read_mask', ), - 'list_studies': ('parent', 'page_token', 'page_size', ), - 'list_tensorboard_experiments': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_tensorboard_runs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_tensorboards': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_tensorboard_time_series': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_training_pipelines': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_trials': ('parent', 'page_token', 'page_size', ), - 'lookup_study': ('parent', 'display_name', ), - 'pause_model_deployment_monitoring_job': ('name', ), - 'predict': ('endpoint', 'instances', 'parameters', ), - 'query_artifact_lineage_subgraph': ('artifact', 'max_hops', 'filter', ), - 'query_context_lineage_subgraph': ('context', ), - 'query_execution_inputs_and_outputs': ('execution', ), - 'read_feature_values': ('entity_type', 'entity_id', 'feature_selector', ), - 'read_tensorboard_blob_data': ('time_series', 'blob_ids', ), - 'read_tensorboard_time_series_data': ('tensorboard_time_series', 'max_data_points', 'filter', ), - 'resume_model_deployment_monitoring_job': ('name', ), - 'search_features': ('location', 'query', 'page_size', 'page_token', ), - 'search_migratable_resources': ('parent', 'page_size', 'page_token', 'filter', ), - 'search_model_deployment_monitoring_stats_anomalies': ('model_deployment_monitoring_job', 'deployed_model_id', 'objectives', 'feature_display_name', 'page_size', 'page_token', 'start_time', 'end_time', ), - 'stop_trial': ('name', ), - 'streaming_read_feature_values': ('entity_type', 'entity_ids', 'feature_selector', ), - 'suggest_trials': ('parent', 'suggestion_count', 'client_id', ), - 'undeploy_index': ('index_endpoint', 'deployed_index_id', ), - 'undeploy_model': ('endpoint', 'deployed_model_id', 'traffic_split', ), - 'update_artifact': ('artifact', 'update_mask', 'allow_missing', ), - 'update_context': ('context', 'update_mask', 'allow_missing', ), - 'update_dataset': ('dataset', 'update_mask', ), - 'update_endpoint': ('endpoint', 'update_mask', ), - 'update_entity_type': ('entity_type', 'update_mask', ), - 'update_execution': ('execution', 'update_mask', 'allow_missing', ), - 'update_feature': ('feature', 'update_mask', ), - 'update_featurestore': ('featurestore', 'update_mask', ), - 'update_index': ('index', 'update_mask', ), - 'update_index_endpoint': ('index_endpoint', 'update_mask', ), - 'update_model': ('model', 'update_mask', ), - 'update_model_deployment_monitoring_job': ('model_deployment_monitoring_job', 'update_mask', ), - 'update_specialist_pool': ('specialist_pool', 'update_mask', ), - 'update_tensorboard': ('update_mask', 'tensorboard', ), - 'update_tensorboard_experiment': ('update_mask', 'tensorboard_experiment', ), - 'update_tensorboard_run': ('update_mask', 'tensorboard_run', ), - 'update_tensorboard_time_series': ('update_mask', 'tensorboard_time_series', ), - 'upload_model': ('parent', 'model', ), - 'write_tensorboard_run_data': ('tensorboard_run', 'time_series_data', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=aiplatformCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the aiplatform client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py deleted file mode 100644 index 4759d2731c..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class definitionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=definitionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the definition client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py deleted file mode 100644 index d9e8bd0b17..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class instanceCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=instanceCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the instance client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py deleted file mode 100644 index 17915abed8..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class paramsCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=paramsCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the params client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py deleted file mode 100644 index 7860bb63cf..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class predictionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=predictionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the prediction client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/setup.py b/owl-bot-staging/v1beta1/setup.py deleted file mode 100644 index 601333bef6..0000000000 --- a/owl-bot-staging/v1beta1/setup.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1beta1-schema-trainingjob-definition', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.trainingjob'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 2.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1beta1/tests/__init__.py b/owl-bot-staging/v1beta1/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/__init__.py b/owl-bot-staging/v1beta1/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py deleted file mode 100644 index e66a15462d..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ /dev/null @@ -1,3983 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.dataset_service import DatasetServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.dataset_service import DatasetServiceClient -from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers -from google.cloud.aiplatform_v1beta1.services.dataset_service import transports -from google.cloud.aiplatform_v1beta1.services.dataset_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert DatasetServiceClient._get_default_mtls_endpoint(None) is None - assert DatasetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.DatasetServiceGrpcTransport, "grpc"), - (transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_dataset_service_client_get_transport_class(): - transport = DatasetServiceClient.get_transport_class() - available_transports = [ - transports.DatasetServiceGrpcTransport, - ] - assert transport in available_transports - - transport = DatasetServiceClient.get_transport_class("grpc") - assert transport == transports.DatasetServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) -@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) -def test_dataset_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) -@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_dataset_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_dataset_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = DatasetServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_dataset(transport: str = 'grpc', request_type=dataset_service.CreateDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_dataset_from_dict(): - test_create_dataset(request_type=dict) - - -def test_create_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - client.create_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - -@pytest.mark.asyncio -async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.CreateDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_dataset_async_from_dict(): - await test_create_dataset_async(request_type=dict) - - -def test_create_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.CreateDatasetRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.CreateDatasetRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].dataset == gca_dataset.Dataset(name='name_value') - - -def test_create_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_dataset( - dataset_service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].dataset == gca_dataset.Dataset(name='name_value') - - -@pytest.mark.asyncio -async def test_create_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_dataset( - dataset_service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - -def test_get_dataset(transport: str = 'grpc', request_type=dataset_service.GetDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset( - name='name_value', - display_name='display_name_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_get_dataset_from_dict(): - test_get_dataset(request_type=dict) - - -def test_get_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - client.get_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - -@pytest.mark.asyncio -async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( - name='name_value', - display_name='display_name_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_dataset_async_from_dict(): - await test_get_dataset_async(request_type=dict) - - -def test_get_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = dataset.Dataset() - client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_dataset( - dataset_service.GetDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_dataset( - dataset_service.GetDatasetRequest(), - name='name_value', - ) - - -def test_update_dataset(transport: str = 'grpc', request_type=dataset_service.UpdateDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_update_dataset_from_dict(): - test_update_dataset(request_type=dict) - - -def test_update_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - client.update_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - -@pytest.mark.asyncio -async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.UpdateDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_dataset_async_from_dict(): - await test_update_dataset_async(request_type=dict) - - -def test_update_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.UpdateDatasetRequest() - - request.dataset.name = 'dataset.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = gca_dataset.Dataset() - client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=dataset.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.UpdateDatasetRequest() - - request.dataset.name = 'dataset.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=dataset.name/value', - ) in kw['metadata'] - - -def test_update_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_dataset( - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].dataset == gca_dataset.Dataset(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_dataset( - dataset_service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_dataset( - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].dataset == gca_dataset.Dataset(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_dataset( - dataset_service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_datasets(transport: str = 'grpc', request_type=dataset_service.ListDatasetsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_datasets_from_dict(): - test_list_datasets(request_type=dict) - - -def test_list_datasets_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - client.list_datasets() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - -@pytest.mark.asyncio -async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDatasetsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_datasets_async_from_dict(): - await test_list_datasets_async(request_type=dict) - - -def test_list_datasets_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDatasetsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = dataset_service.ListDatasetsResponse() - client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_datasets_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDatasetsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) - await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_datasets_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_datasets_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_datasets( - dataset_service.ListDatasetsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_datasets_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_datasets_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_datasets( - dataset_service.ListDatasetsRequest(), - parent='parent_value', - ) - - -def test_list_datasets_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_datasets(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in results) - -def test_list_datasets_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = list(client.list_datasets(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_datasets_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_datasets(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in responses) - -@pytest.mark.asyncio -async def test_list_datasets_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_datasets(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_dataset(transport: str = 'grpc', request_type=dataset_service.DeleteDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_dataset_from_dict(): - test_delete_dataset(request_type=dict) - - -def test_delete_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - client.delete_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - -@pytest.mark.asyncio -async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.DeleteDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_dataset_async_from_dict(): - await test_delete_dataset_async(request_type=dict) - - -def test_delete_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.DeleteDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.DeleteDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_dataset( - dataset_service.DeleteDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_dataset( - dataset_service.DeleteDatasetRequest(), - name='name_value', - ) - - -def test_import_data(transport: str = 'grpc', request_type=dataset_service.ImportDataRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_import_data_from_dict(): - test_import_data(request_type=dict) - - -def test_import_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - client.import_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - -@pytest.mark.asyncio -async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ImportDataRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_import_data_async_from_dict(): - await test_import_data_async(request_type=dict) - - -def test_import_data_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ImportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_import_data_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ImportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_import_data_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.import_data( - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].import_configs == [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] - - -def test_import_data_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_data( - dataset_service.ImportDataRequest(), - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - -@pytest.mark.asyncio -async def test_import_data_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.import_data( - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].import_configs == [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] - - -@pytest.mark.asyncio -async def test_import_data_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.import_data( - dataset_service.ImportDataRequest(), - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - -def test_export_data(transport: str = 'grpc', request_type=dataset_service.ExportDataRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_data_from_dict(): - test_export_data(request_type=dict) - - -def test_export_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - client.export_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - -@pytest.mark.asyncio -async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ExportDataRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_data_async_from_dict(): - await test_export_data_async(request_type=dict) - - -def test_export_data_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ExportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_data_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ExportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_export_data_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_data( - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].export_config == dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - - -def test_export_data_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_data( - dataset_service.ExportDataRequest(), - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -@pytest.mark.asyncio -async def test_export_data_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_data( - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].export_config == dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - - -@pytest.mark.asyncio -async def test_export_data_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_data( - dataset_service.ExportDataRequest(), - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -def test_list_data_items(transport: str = 'grpc', request_type=dataset_service.ListDataItemsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataItemsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_data_items_from_dict(): - test_list_data_items(request_type=dict) - - -def test_list_data_items_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - client.list_data_items() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - -@pytest.mark.asyncio -async def test_list_data_items_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDataItemsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataItemsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_data_items_async_from_dict(): - await test_list_data_items_async(request_type=dict) - - -def test_list_data_items_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDataItemsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - call.return_value = dataset_service.ListDataItemsResponse() - client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_data_items_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDataItemsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) - await client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_data_items_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_data_items( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_data_items_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_data_items( - dataset_service.ListDataItemsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_data_items_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_data_items( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_data_items_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_data_items( - dataset_service.ListDataItemsRequest(), - parent='parent_value', - ) - - -def test_list_data_items_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_data_items(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, data_item.DataItem) - for i in results) - -def test_list_data_items_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - pages = list(client.list_data_items(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_data_items_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_data_items(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, data_item.DataItem) - for i in responses) - -@pytest.mark.asyncio -async def test_list_data_items_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_data_items(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_annotation_spec(transport: str = 'grpc', request_type=dataset_service.GetAnnotationSpecRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - ) - response = client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -def test_get_annotation_spec_from_dict(): - test_get_annotation_spec(request_type=dict) - - -def test_get_annotation_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - client.get_annotation_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetAnnotationSpecRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - )) - response = await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async_from_dict(): - await test_get_annotation_spec_async(request_type=dict) - - -def test_get_annotation_spec_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetAnnotationSpecRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = annotation_spec.AnnotationSpec() - client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_annotation_spec_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetAnnotationSpecRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_annotation_spec_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_annotation_spec_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_annotation_spec( - dataset_service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_annotation_spec( - dataset_service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -def test_list_annotations(transport: str = 'grpc', request_type=dataset_service.ListAnnotationsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAnnotationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_annotations_from_dict(): - test_list_annotations(request_type=dict) - - -def test_list_annotations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - client.list_annotations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - -@pytest.mark.asyncio -async def test_list_annotations_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListAnnotationsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAnnotationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_annotations_async_from_dict(): - await test_list_annotations_async(request_type=dict) - - -def test_list_annotations_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListAnnotationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - call.return_value = dataset_service.ListAnnotationsResponse() - client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_annotations_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListAnnotationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) - await client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_annotations_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_annotations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_annotations_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_annotations( - dataset_service.ListAnnotationsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_annotations_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_annotations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_annotations_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_annotations( - dataset_service.ListAnnotationsRequest(), - parent='parent_value', - ) - - -def test_list_annotations_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_annotations(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, annotation.Annotation) - for i in results) - -def test_list_annotations_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_annotations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_annotations_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_annotations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, annotation.Annotation) - for i in responses) - -@pytest.mark.asyncio -async def test_list_annotations_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_annotations(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = DatasetServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.DatasetServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.DatasetServiceGrpcTransport, - ) - -def test_dataset_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.DatasetServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_dataset_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.DatasetServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_dataset', - 'get_dataset', - 'update_dataset', - 'list_datasets', - 'delete_dataset', - 'import_data', - 'export_data', - 'list_data_items', - 'get_annotation_spec', - 'list_annotations', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_dataset_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_dataset_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_dataset_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_dataset_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - DatasetServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_dataset_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - DatasetServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_dataset_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_dataset_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.DatasetServiceGrpcTransport, grpc_helpers), - (transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_dataset_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_dataset_service_host_no_port(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_dataset_service_host_with_port(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_dataset_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.DatasetServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_dataset_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.DatasetServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_dataset_service_grpc_lro_client(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_dataset_service_grpc_lro_async_client(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotation_path(): - project = "squid" - location = "clam" - dataset = "whelk" - data_item = "octopus" - annotation = "oyster" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) - actual = DatasetServiceClient.annotation_path(project, location, dataset, data_item, annotation) - assert expected == actual - - -def test_parse_annotation_path(): - expected = { - "project": "nudibranch", - "location": "cuttlefish", - "dataset": "mussel", - "data_item": "winkle", - "annotation": "nautilus", - } - path = DatasetServiceClient.annotation_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_annotation_path(path) - assert expected == actual - -def test_annotation_spec_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - annotation_spec = "clam" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - actual = DatasetServiceClient.annotation_spec_path(project, location, dataset, annotation_spec) - assert expected == actual - - -def test_parse_annotation_spec_path(): - expected = { - "project": "whelk", - "location": "octopus", - "dataset": "oyster", - "annotation_spec": "nudibranch", - } - path = DatasetServiceClient.annotation_spec_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_annotation_spec_path(path) - assert expected == actual - -def test_data_item_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - data_item = "nautilus" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) - actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item) - assert expected == actual - - -def test_parse_data_item_path(): - expected = { - "project": "scallop", - "location": "abalone", - "dataset": "squid", - "data_item": "clam", - } - path = DatasetServiceClient.data_item_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_data_item_path(path) - assert expected == actual - -def test_dataset_path(): - project = "whelk" - location = "octopus" - dataset = "oyster" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = DatasetServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nudibranch", - "location": "cuttlefish", - "dataset": "mussel", - } - path = DatasetServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = DatasetServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = DatasetServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = DatasetServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = DatasetServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = DatasetServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = DatasetServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = DatasetServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = DatasetServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = DatasetServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = DatasetServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = DatasetServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py deleted file mode 100644 index 6d26f8c489..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ /dev/null @@ -1,2868 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.endpoint_service import EndpointServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.endpoint_service import EndpointServiceClient -from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.services.endpoint_service import transports -from google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import accelerator_type -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert EndpointServiceClient._get_default_mtls_endpoint(None) is None - assert EndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.EndpointServiceGrpcTransport, "grpc"), - (transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_endpoint_service_client_get_transport_class(): - transport = EndpointServiceClient.get_transport_class() - available_transports = [ - transports.EndpointServiceGrpcTransport, - ] - assert transport in available_transports - - transport = EndpointServiceClient.get_transport_class("grpc") - assert transport == transports.EndpointServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) -@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) -def test_endpoint_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "true"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "false"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) -@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_endpoint_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = EndpointServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_endpoint(transport: str = 'grpc', request_type=endpoint_service.CreateEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_endpoint_from_dict(): - test_create_endpoint(request_type=dict) - - -def test_create_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - client.create_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - -@pytest.mark.asyncio -async def test_create_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.CreateEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_endpoint_async_from_dict(): - await test_create_endpoint_async(request_type=dict) - - -def test_create_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.CreateEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.CreateEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_endpoint( - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') - - -def test_create_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_endpoint( - endpoint_service.CreateEndpointRequest(), - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_endpoint( - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') - - -@pytest.mark.asyncio -async def test_create_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_endpoint( - endpoint_service.CreateEndpointRequest(), - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - ) - - -def test_get_endpoint(transport: str = 'grpc', request_type=endpoint_service.GetEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_get_endpoint_from_dict(): - test_get_endpoint(request_type=dict) - - -def test_get_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - client.get_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - -@pytest.mark.asyncio -async def test_get_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.GetEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_endpoint_async_from_dict(): - await test_get_endpoint_async(request_type=dict) - - -def test_get_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.GetEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - call.return_value = endpoint.Endpoint() - client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.GetEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) - await client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_endpoint( - endpoint_service.GetEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_endpoint( - endpoint_service.GetEndpointRequest(), - name='name_value', - ) - - -def test_list_endpoints(transport: str = 'grpc', request_type=endpoint_service.ListEndpointsRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEndpointsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_endpoints_from_dict(): - test_list_endpoints(request_type=dict) - - -def test_list_endpoints_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - client.list_endpoints() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - -@pytest.mark.asyncio -async def test_list_endpoints_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.ListEndpointsRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEndpointsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_endpoints_async_from_dict(): - await test_list_endpoints_async(request_type=dict) - - -def test_list_endpoints_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.ListEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - call.return_value = endpoint_service.ListEndpointsResponse() - client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_endpoints_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.ListEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) - await client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_endpoints_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_endpoints_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_endpoints( - endpoint_service.ListEndpointsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_endpoints_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_endpoints_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_endpoints( - endpoint_service.ListEndpointsRequest(), - parent='parent_value', - ) - - -def test_list_endpoints_pager(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_endpoints(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, endpoint.Endpoint) - for i in results) - -def test_list_endpoints_pages(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - pages = list(client.list_endpoints(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_endpoints_async_pager(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_endpoints(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, endpoint.Endpoint) - for i in responses) - -@pytest.mark.asyncio -async def test_list_endpoints_async_pages(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_endpoints(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_endpoint(transport: str = 'grpc', request_type=endpoint_service.UpdateEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_update_endpoint_from_dict(): - test_update_endpoint(request_type=dict) - - -def test_update_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - client.update_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - -@pytest.mark.asyncio -async def test_update_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UpdateEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_endpoint_async_from_dict(): - await test_update_endpoint_async(request_type=dict) - - -def test_update_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UpdateEndpointRequest() - - request.endpoint.name = 'endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - call.return_value = gca_endpoint.Endpoint() - client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint.name=endpoint.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UpdateEndpointRequest() - - request.endpoint.name = 'endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) - await client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint.name=endpoint.name/value', - ) in kw['metadata'] - - -def test_update_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_endpoint( - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_endpoint( - endpoint_service.UpdateEndpointRequest(), - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_endpoint( - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == gca_endpoint.Endpoint(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_endpoint( - endpoint_service.UpdateEndpointRequest(), - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_endpoint(transport: str = 'grpc', request_type=endpoint_service.DeleteEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_endpoint_from_dict(): - test_delete_endpoint(request_type=dict) - - -def test_delete_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - client.delete_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - -@pytest.mark.asyncio -async def test_delete_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeleteEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_endpoint_async_from_dict(): - await test_delete_endpoint_async(request_type=dict) - - -def test_delete_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeleteEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeleteEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_endpoint( - endpoint_service.DeleteEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_endpoint( - endpoint_service.DeleteEndpointRequest(), - name='name_value', - ) - - -def test_deploy_model(transport: str = 'grpc', request_type=endpoint_service.DeployModelRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_deploy_model_from_dict(): - test_deploy_model(request_type=dict) - - -def test_deploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - client.deploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - -@pytest.mark.asyncio -async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeployModelRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_deploy_model_async_from_dict(): - await test_deploy_model_async(request_type=dict) - - -def test_deploy_model_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_deploy_model_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_deploy_model_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.deploy_model( - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].deployed_model == gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) - assert args[0].traffic_split == {'key_value': 541} - - -def test_deploy_model_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_model( - endpoint_service.DeployModelRequest(), - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - -@pytest.mark.asyncio -async def test_deploy_model_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.deploy_model( - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].deployed_model == gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) - assert args[0].traffic_split == {'key_value': 541} - - -@pytest.mark.asyncio -async def test_deploy_model_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.deploy_model( - endpoint_service.DeployModelRequest(), - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - -def test_undeploy_model(transport: str = 'grpc', request_type=endpoint_service.UndeployModelRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undeploy_model_from_dict(): - test_undeploy_model(request_type=dict) - - -def test_undeploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - client.undeploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - -@pytest.mark.asyncio -async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UndeployModelRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undeploy_model_async_from_dict(): - await test_undeploy_model_async(request_type=dict) - - -def test_undeploy_model_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UndeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undeploy_model_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UndeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_undeploy_model_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undeploy_model( - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].deployed_model_id == 'deployed_model_id_value' - assert args[0].traffic_split == {'key_value': 541} - - -def test_undeploy_model_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_model( - endpoint_service.UndeployModelRequest(), - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undeploy_model( - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].deployed_model_id == 'deployed_model_id_value' - assert args[0].traffic_split == {'key_value': 541} - - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undeploy_model( - endpoint_service.UndeployModelRequest(), - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = EndpointServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.EndpointServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.EndpointServiceGrpcTransport, - ) - -def test_endpoint_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.EndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_endpoint_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.EndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_endpoint', - 'get_endpoint', - 'list_endpoints', - 'update_endpoint', - 'delete_endpoint', - 'deploy_model', - 'undeploy_model', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_endpoint_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_endpoint_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_endpoint_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_endpoint_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - EndpointServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_endpoint_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - EndpointServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_endpoint_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_endpoint_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.EndpointServiceGrpcTransport, grpc_helpers), - (transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_endpoint_service_host_no_port(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_endpoint_service_host_with_port(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_endpoint_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.EndpointServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_endpoint_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.EndpointServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_endpoint_service_grpc_lro_client(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_service_grpc_lro_async_client(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = EndpointServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = EndpointServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = EndpointServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = EndpointServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_model_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = EndpointServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = EndpointServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = EndpointServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = EndpointServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = EndpointServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = EndpointServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = EndpointServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = EndpointServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = EndpointServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = EndpointServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = EndpointServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py deleted file mode 100644 index 323b59d601..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py +++ /dev/null @@ -1,1391 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import transports -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import feature_selector -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from google.oauth2 import service_account -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, -]) -def test_featurestore_online_serving_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, -]) -def test_featurestore_online_serving_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_online_serving_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, -]) -def test_featurestore_online_serving_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_online_serving_service_client_get_transport_class(): - transport = FeaturestoreOnlineServingServiceClient.get_transport_class() - available_transports = [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - ] - assert transport in available_transports - - transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc") - assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) -@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) -def test_featurestore_online_serving_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "true"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "false"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) -@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_featurestore_online_serving_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_online_serving_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_online_serving_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_featurestore_online_serving_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = FeaturestoreOnlineServingServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.ReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_online_service.ReadFeatureValuesResponse( - ) - response = client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) - - -def test_read_feature_values_from_dict(): - test_read_feature_values(request_type=dict) - - -def test_read_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - client.read_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.ReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse( - )) - response = await client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) - - -@pytest.mark.asyncio -async def test_read_feature_values_async_from_dict(): - await test_read_feature_values_async(request_type=dict) - - -def test_read_feature_values_field_headers(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.ReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - call.return_value = featurestore_online_service.ReadFeatureValuesResponse() - client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_feature_values_field_headers_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.ReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) - await client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_read_feature_values_flattened(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_online_service.ReadFeatureValuesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == 'entity_type_value' - - -def test_read_feature_values_flattened_error(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_feature_values( - featurestore_online_service.ReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_read_feature_values_flattened_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_online_service.ReadFeatureValuesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == 'entity_type_value' - - -@pytest.mark.asyncio -async def test_read_feature_values_flattened_error_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_feature_values( - featurestore_online_service.ReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_streaming_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - response = client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) - - -def test_streaming_read_feature_values_from_dict(): - test_streaming_read_feature_values(request_type=dict) - - -def test_streaming_read_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - client.streaming_read_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) - response = await client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_async_from_dict(): - await test_streaming_read_feature_values_async(request_type=dict) - - -def test_streaming_read_feature_values_field_headers(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.StreamingReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_field_headers_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.StreamingReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) - await client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_streaming_read_feature_values_flattened(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.streaming_read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == 'entity_type_value' - - -def test_streaming_read_feature_values_flattened_error(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.streaming_read_feature_values( - featurestore_online_service.StreamingReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_flattened_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.streaming_read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == 'entity_type_value' - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_flattened_error_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.streaming_read_feature_values( - featurestore_online_service.StreamingReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreOnlineServingServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreOnlineServingServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = FeaturestoreOnlineServingServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.FeaturestoreOnlineServingServiceGrpcTransport, - ) - -def test_featurestore_online_serving_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_featurestore_online_serving_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'read_feature_values', - 'streaming_read_feature_values', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - -@requires_google_auth_gte_1_25_0 -def test_featurestore_online_serving_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_featurestore_online_serving_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_featurestore_online_serving_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreOnlineServingServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_featurestore_online_serving_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - FeaturestoreOnlineServingServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_featurestore_online_serving_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - FeaturestoreOnlineServingServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_featurestore_online_serving_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_featurestore_online_serving_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers), - (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_featurestore_online_serving_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) -def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_featurestore_online_serving_service_host_no_port(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_online_serving_service_host_with_port(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_featurestore_online_serving_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_featurestore_online_serving_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) -def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) -def test_featurestore_online_serving_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_entity_type_path(): - project = "squid" - location = "clam" - featurestore = "whelk" - entity_type = "octopus" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - actual = FeaturestoreOnlineServingServiceClient.entity_type_path(project, location, featurestore, entity_type) - assert expected == actual - - -def test_parse_entity_type_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "featurestore": "cuttlefish", - "entity_type": "mussel", - } - path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = FeaturestoreOnlineServingServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = FeaturestoreOnlineServingServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = FeaturestoreOnlineServingServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py deleted file mode 100644 index 99d37926b2..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ /dev/null @@ -1,6489 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.featurestore_service import FeaturestoreServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.featurestore_service import FeaturestoreServiceClient -from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1beta1.services.featurestore_service import transports -from google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import feature_selector -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert FeaturestoreServiceClient._get_default_mtls_endpoint(None) is None - assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, -]) -def test_featurestore_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, -]) -def test_featurestore_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.FeaturestoreServiceGrpcTransport, "grpc"), - (transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, -]) -def test_featurestore_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_service_client_get_transport_class(): - transport = FeaturestoreServiceClient.get_transport_class() - available_transports = [ - transports.FeaturestoreServiceGrpcTransport, - ] - assert transport in available_transports - - transport = FeaturestoreServiceClient.get_transport_class("grpc") - assert transport == transports.FeaturestoreServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) -@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) -def test_featurestore_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "true"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "false"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) -@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_featurestore_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_featurestore_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = FeaturestoreServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_featurestore(transport: str = 'grpc', request_type=featurestore_service.CreateFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_featurestore_from_dict(): - test_create_featurestore(request_type=dict) - - -def test_create_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - client.create_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_create_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_featurestore_async_from_dict(): - await test_create_featurestore_async(request_type=dict) - - -def test_create_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeaturestoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeaturestoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_featurestore( - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].featurestore == gca_featurestore.Featurestore(name='name_value') - - -def test_create_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_featurestore( - featurestore_service.CreateFeaturestoreRequest(), - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_featurestore( - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].featurestore == gca_featurestore.Featurestore(name='name_value') - - -@pytest.mark.asyncio -async def test_create_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_featurestore( - featurestore_service.CreateFeaturestoreRequest(), - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - ) - - -def test_get_featurestore(transport: str = 'grpc', request_type=featurestore_service.GetFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore.Featurestore( - name='name_value', - etag='etag_value', - state=featurestore.Featurestore.State.STABLE, - ) - response = client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore.Featurestore) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.state == featurestore.Featurestore.State.STABLE - - -def test_get_featurestore_from_dict(): - test_get_featurestore(request_type=dict) - - -def test_get_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - client.get_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_get_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore( - name='name_value', - etag='etag_value', - state=featurestore.Featurestore.State.STABLE, - )) - response = await client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore.Featurestore) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.state == featurestore.Featurestore.State.STABLE - - -@pytest.mark.asyncio -async def test_get_featurestore_async_from_dict(): - await test_get_featurestore_async(request_type=dict) - - -def test_get_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - call.return_value = featurestore.Featurestore() - client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) - await client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore.Featurestore() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_featurestore( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_featurestore( - featurestore_service.GetFeaturestoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore.Featurestore() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_featurestore( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_featurestore( - featurestore_service.GetFeaturestoreRequest(), - name='name_value', - ) - - -def test_list_featurestores(transport: str = 'grpc', request_type=featurestore_service.ListFeaturestoresRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturestoresResponse( - next_page_token='next_page_token_value', - ) - response = client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturestoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturestoresPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_featurestores_from_dict(): - test_list_featurestores(request_type=dict) - - -def test_list_featurestores_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - client.list_featurestores() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturestoresRequest() - - -@pytest.mark.asyncio -async def test_list_featurestores_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturestoresRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturestoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturestoresAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_featurestores_async_from_dict(): - await test_list_featurestores_async(request_type=dict) - - -def test_list_featurestores_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturestoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - call.return_value = featurestore_service.ListFeaturestoresResponse() - client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_featurestores_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturestoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) - await client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_featurestores_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturestoresResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_featurestores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_featurestores_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_featurestores( - featurestore_service.ListFeaturestoresRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_featurestores_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturestoresResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_featurestores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_featurestores_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_featurestores( - featurestore_service.ListFeaturestoresRequest(), - parent='parent_value', - ) - - -def test_list_featurestores_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_featurestores(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, featurestore.Featurestore) - for i in results) - -def test_list_featurestores_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - pages = list(client.list_featurestores(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_featurestores_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_featurestores(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, featurestore.Featurestore) - for i in responses) - -@pytest.mark.asyncio -async def test_list_featurestores_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_featurestores(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_featurestore(transport: str = 'grpc', request_type=featurestore_service.UpdateFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_featurestore_from_dict(): - test_update_featurestore(request_type=dict) - - -def test_update_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - client.update_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_update_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_featurestore_async_from_dict(): - await test_update_featurestore_async(request_type=dict) - - -def test_update_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeaturestoreRequest() - - request.featurestore.name = 'featurestore.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore.name=featurestore.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeaturestoreRequest() - - request.featurestore.name = 'featurestore.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore.name=featurestore.name/value', - ) in kw['metadata'] - - -def test_update_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_featurestore( - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].featurestore == gca_featurestore.Featurestore(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_featurestore( - featurestore_service.UpdateFeaturestoreRequest(), - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_featurestore( - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].featurestore == gca_featurestore.Featurestore(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_featurestore( - featurestore_service.UpdateFeaturestoreRequest(), - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_featurestore(transport: str = 'grpc', request_type=featurestore_service.DeleteFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_featurestore_from_dict(): - test_delete_featurestore(request_type=dict) - - -def test_delete_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - client.delete_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_delete_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_featurestore_async_from_dict(): - await test_delete_featurestore_async(request_type=dict) - - -def test_delete_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_featurestore( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_featurestore( - featurestore_service.DeleteFeaturestoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_featurestore( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_featurestore( - featurestore_service.DeleteFeaturestoreRequest(), - name='name_value', - ) - - -def test_create_entity_type(transport: str = 'grpc', request_type=featurestore_service.CreateEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_entity_type_from_dict(): - test_create_entity_type(request_type=dict) - - -def test_create_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - client.create_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_create_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_entity_type_async_from_dict(): - await test_create_entity_type_async(request_type=dict) - - -def test_create_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateEntityTypeRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateEntityTypeRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_entity_type( - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].entity_type == gca_entity_type.EntityType(name='name_value') - - -def test_create_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_entity_type( - featurestore_service.CreateEntityTypeRequest(), - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_entity_type( - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].entity_type == gca_entity_type.EntityType(name='name_value') - - -@pytest.mark.asyncio -async def test_create_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_entity_type( - featurestore_service.CreateEntityTypeRequest(), - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - ) - - -def test_get_entity_type(transport: str = 'grpc', request_type=featurestore_service.GetEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - ) - response = client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_get_entity_type_from_dict(): - test_get_entity_type(request_type=dict) - - -def test_get_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - client.get_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_get_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - )) - response = await client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_entity_type_async_from_dict(): - await test_get_entity_type_async(request_type=dict) - - -def test_get_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - call.return_value = entity_type.EntityType() - client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) - await client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = entity_type.EntityType() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_entity_type( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_entity_type( - featurestore_service.GetEntityTypeRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = entity_type.EntityType() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_entity_type( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_entity_type( - featurestore_service.GetEntityTypeRequest(), - name='name_value', - ) - - -def test_list_entity_types(transport: str = 'grpc', request_type=featurestore_service.ListEntityTypesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListEntityTypesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListEntityTypesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEntityTypesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_entity_types_from_dict(): - test_list_entity_types(request_type=dict) - - -def test_list_entity_types_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - client.list_entity_types() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListEntityTypesRequest() - - -@pytest.mark.asyncio -async def test_list_entity_types_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListEntityTypesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListEntityTypesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEntityTypesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_entity_types_async_from_dict(): - await test_list_entity_types_async(request_type=dict) - - -def test_list_entity_types_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListEntityTypesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - call.return_value = featurestore_service.ListEntityTypesResponse() - client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_entity_types_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListEntityTypesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) - await client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_entity_types_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListEntityTypesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_entity_types( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_entity_types_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_entity_types( - featurestore_service.ListEntityTypesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_entity_types_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListEntityTypesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_entity_types( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_entity_types_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_entity_types( - featurestore_service.ListEntityTypesRequest(), - parent='parent_value', - ) - - -def test_list_entity_types_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_entity_types(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, entity_type.EntityType) - for i in results) - -def test_list_entity_types_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - pages = list(client.list_entity_types(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_entity_types_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_entity_types(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, entity_type.EntityType) - for i in responses) - -@pytest.mark.asyncio -async def test_list_entity_types_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_entity_types(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_entity_type(transport: str = 'grpc', request_type=featurestore_service.UpdateEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - ) - response = client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_update_entity_type_from_dict(): - test_update_entity_type(request_type=dict) - - -def test_update_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - client.update_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_update_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - )) - response = await client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_entity_type_async_from_dict(): - await test_update_entity_type_async(request_type=dict) - - -def test_update_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateEntityTypeRequest() - - request.entity_type.name = 'entity_type.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - call.return_value = gca_entity_type.EntityType() - client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type.name=entity_type.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateEntityTypeRequest() - - request.entity_type.name = 'entity_type.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) - await client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type.name=entity_type.name/value', - ) in kw['metadata'] - - -def test_update_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_entity_type.EntityType() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_entity_type( - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == gca_entity_type.EntityType(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_entity_type( - featurestore_service.UpdateEntityTypeRequest(), - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_entity_type.EntityType() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_entity_type( - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == gca_entity_type.EntityType(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_entity_type( - featurestore_service.UpdateEntityTypeRequest(), - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_entity_type(transport: str = 'grpc', request_type=featurestore_service.DeleteEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_entity_type_from_dict(): - test_delete_entity_type(request_type=dict) - - -def test_delete_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - client.delete_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_delete_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_entity_type_async_from_dict(): - await test_delete_entity_type_async(request_type=dict) - - -def test_delete_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_entity_type( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_entity_type( - featurestore_service.DeleteEntityTypeRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_entity_type( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_entity_type( - featurestore_service.DeleteEntityTypeRequest(), - name='name_value', - ) - - -def test_create_feature(transport: str = 'grpc', request_type=featurestore_service.CreateFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_feature_from_dict(): - test_create_feature(request_type=dict) - - -def test_create_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - client.create_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeatureRequest() - - -@pytest.mark.asyncio -async def test_create_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_feature_async_from_dict(): - await test_create_feature_async(request_type=dict) - - -def test_create_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeatureRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeatureRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_feature( - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].feature == gca_feature.Feature(name='name_value') - - -def test_create_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_feature( - featurestore_service.CreateFeatureRequest(), - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_feature( - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].feature == gca_feature.Feature(name='name_value') - - -@pytest.mark.asyncio -async def test_create_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_feature( - featurestore_service.CreateFeatureRequest(), - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - ) - - -def test_batch_create_features(transport: str = 'grpc', request_type=featurestore_service.BatchCreateFeaturesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchCreateFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_create_features_from_dict(): - test_batch_create_features(request_type=dict) - - -def test_batch_create_features_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - client.batch_create_features() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchCreateFeaturesRequest() - - -@pytest.mark.asyncio -async def test_batch_create_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchCreateFeaturesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchCreateFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_create_features_async_from_dict(): - await test_batch_create_features_async(request_type=dict) - - -def test_batch_create_features_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchCreateFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_create_features_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchCreateFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_create_features_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_create_features( - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].requests == [featurestore_service.CreateFeatureRequest(parent='parent_value')] - - -def test_batch_create_features_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_create_features( - featurestore_service.BatchCreateFeaturesRequest(), - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - -@pytest.mark.asyncio -async def test_batch_create_features_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_create_features( - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].requests == [featurestore_service.CreateFeatureRequest(parent='parent_value')] - - -@pytest.mark.asyncio -async def test_batch_create_features_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_create_features( - featurestore_service.BatchCreateFeaturesRequest(), - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - -def test_get_feature(transport: str = 'grpc', request_type=featurestore_service.GetFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = feature.Feature( - name='name_value', - description='description_value', - value_type=feature.Feature.ValueType.BOOL, - etag='etag_value', - ) - response = client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -def test_get_feature_from_dict(): - test_get_feature(request_type=dict) - - -def test_get_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - client.get_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeatureRequest() - - -@pytest.mark.asyncio -async def test_get_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature( - name='name_value', - description='description_value', - value_type=feature.Feature.ValueType.BOOL, - etag='etag_value', - )) - response = await client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_feature_async_from_dict(): - await test_get_feature_async(request_type=dict) - - -def test_get_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - call.return_value = feature.Feature() - client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) - await client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = feature.Feature() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_feature( - featurestore_service.GetFeatureRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = feature.Feature() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_feature( - featurestore_service.GetFeatureRequest(), - name='name_value', - ) - - -def test_list_features(transport: str = 'grpc', request_type=featurestore_service.ListFeaturesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_features_from_dict(): - test_list_features(request_type=dict) - - -def test_list_features_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - client.list_features() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturesRequest() - - -@pytest.mark.asyncio -async def test_list_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_features_async_from_dict(): - await test_list_features_async(request_type=dict) - - -def test_list_features_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - call.return_value = featurestore_service.ListFeaturesResponse() - client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_features_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) - await client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_features_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_features( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_features_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_features( - featurestore_service.ListFeaturesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_features_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_features( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_features_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_features( - featurestore_service.ListFeaturesRequest(), - parent='parent_value', - ) - - -def test_list_features_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_features(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, feature.Feature) - for i in results) - -def test_list_features_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = list(client.list_features(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_features_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_features(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, feature.Feature) - for i in responses) - -@pytest.mark.asyncio -async def test_list_features_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_features(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_feature(transport: str = 'grpc', request_type=featurestore_service.UpdateFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_feature.Feature( - name='name_value', - description='description_value', - value_type=gca_feature.Feature.ValueType.BOOL, - etag='etag_value', - ) - response = client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == gca_feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -def test_update_feature_from_dict(): - test_update_feature(request_type=dict) - - -def test_update_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - client.update_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeatureRequest() - - -@pytest.mark.asyncio -async def test_update_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature( - name='name_value', - description='description_value', - value_type=gca_feature.Feature.ValueType.BOOL, - etag='etag_value', - )) - response = await client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == gca_feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_feature_async_from_dict(): - await test_update_feature_async(request_type=dict) - - -def test_update_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeatureRequest() - - request.feature.name = 'feature.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - call.return_value = gca_feature.Feature() - client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'feature.name=feature.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeatureRequest() - - request.feature.name = 'feature.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) - await client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'feature.name=feature.name/value', - ) in kw['metadata'] - - -def test_update_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_feature.Feature() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_feature( - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].feature == gca_feature.Feature(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_feature( - featurestore_service.UpdateFeatureRequest(), - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_feature.Feature() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_feature( - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].feature == gca_feature.Feature(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_feature( - featurestore_service.UpdateFeatureRequest(), - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_feature(transport: str = 'grpc', request_type=featurestore_service.DeleteFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_feature_from_dict(): - test_delete_feature(request_type=dict) - - -def test_delete_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - client.delete_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeatureRequest() - - -@pytest.mark.asyncio -async def test_delete_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_feature_async_from_dict(): - await test_delete_feature_async(request_type=dict) - - -def test_delete_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_feature( - featurestore_service.DeleteFeatureRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_feature( - featurestore_service.DeleteFeatureRequest(), - name='name_value', - ) - - -def test_import_feature_values(transport: str = 'grpc', request_type=featurestore_service.ImportFeatureValuesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ImportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_import_feature_values_from_dict(): - test_import_feature_values(request_type=dict) - - -def test_import_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - client.import_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ImportFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_import_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ImportFeatureValuesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ImportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_import_feature_values_async_from_dict(): - await test_import_feature_values_async(request_type=dict) - - -def test_import_feature_values_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ImportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_import_feature_values_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ImportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_import_feature_values_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.import_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == 'entity_type_value' - - -def test_import_feature_values_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_feature_values( - featurestore_service.ImportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_import_feature_values_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.import_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == 'entity_type_value' - - -@pytest.mark.asyncio -async def test_import_feature_values_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.import_feature_values( - featurestore_service.ImportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_batch_read_feature_values(transport: str = 'grpc', request_type=featurestore_service.BatchReadFeatureValuesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_read_feature_values_from_dict(): - test_batch_read_feature_values(request_type=dict) - - -def test_batch_read_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - client.batch_read_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchReadFeatureValuesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_async_from_dict(): - await test_batch_read_feature_values_async(request_type=dict) - - -def test_batch_read_feature_values_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchReadFeatureValuesRequest() - - request.featurestore = 'featurestore/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore=featurestore/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchReadFeatureValuesRequest() - - request.featurestore = 'featurestore/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore=featurestore/value', - ) in kw['metadata'] - - -def test_batch_read_feature_values_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_read_feature_values( - featurestore='featurestore_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].featurestore == 'featurestore_value' - - -def test_batch_read_feature_values_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_read_feature_values( - featurestore_service.BatchReadFeatureValuesRequest(), - featurestore='featurestore_value', - ) - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_read_feature_values( - featurestore='featurestore_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].featurestore == 'featurestore_value' - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_read_feature_values( - featurestore_service.BatchReadFeatureValuesRequest(), - featurestore='featurestore_value', - ) - - -def test_export_feature_values(transport: str = 'grpc', request_type=featurestore_service.ExportFeatureValuesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ExportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_feature_values_from_dict(): - test_export_feature_values(request_type=dict) - - -def test_export_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - client.export_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ExportFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_export_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ExportFeatureValuesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ExportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_feature_values_async_from_dict(): - await test_export_feature_values_async(request_type=dict) - - -def test_export_feature_values_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ExportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_feature_values_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ExportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_export_feature_values_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == 'entity_type_value' - - -def test_export_feature_values_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_feature_values( - featurestore_service.ExportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_export_feature_values_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].entity_type == 'entity_type_value' - - -@pytest.mark.asyncio -async def test_export_feature_values_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_feature_values( - featurestore_service.ExportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_search_features(transport: str = 'grpc', request_type=featurestore_service.SearchFeaturesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.SearchFeaturesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.SearchFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchFeaturesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_features_from_dict(): - test_search_features(request_type=dict) - - -def test_search_features_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - client.search_features() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.SearchFeaturesRequest() - - -@pytest.mark.asyncio -async def test_search_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.SearchFeaturesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.SearchFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchFeaturesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_features_async_from_dict(): - await test_search_features_async(request_type=dict) - - -def test_search_features_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.SearchFeaturesRequest() - - request.location = 'location/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - call.return_value = featurestore_service.SearchFeaturesResponse() - client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'location=location/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_features_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.SearchFeaturesRequest() - - request.location = 'location/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) - await client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'location=location/value', - ) in kw['metadata'] - - -def test_search_features_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.SearchFeaturesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_features( - location='location_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].location == 'location_value' - - -def test_search_features_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_features( - featurestore_service.SearchFeaturesRequest(), - location='location_value', - ) - - -@pytest.mark.asyncio -async def test_search_features_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.SearchFeaturesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_features( - location='location_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].location == 'location_value' - - -@pytest.mark.asyncio -async def test_search_features_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_features( - featurestore_service.SearchFeaturesRequest(), - location='location_value', - ) - - -def test_search_features_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('location', ''), - )), - ) - pager = client.search_features(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, feature.Feature) - for i in results) - -def test_search_features_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = list(client.search_features(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_features_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_features(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, feature.Feature) - for i in responses) - -@pytest.mark.asyncio -async def test_search_features_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_features(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = FeaturestoreServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.FeaturestoreServiceGrpcTransport, - transports.FeaturestoreServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.FeaturestoreServiceGrpcTransport, - ) - -def test_featurestore_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.FeaturestoreServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_featurestore_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.FeaturestoreServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_featurestore', - 'get_featurestore', - 'list_featurestores', - 'update_featurestore', - 'delete_featurestore', - 'create_entity_type', - 'get_entity_type', - 'list_entity_types', - 'update_entity_type', - 'delete_entity_type', - 'create_feature', - 'batch_create_features', - 'get_feature', - 'list_features', - 'update_feature', - 'delete_feature', - 'import_feature_values', - 'batch_read_feature_values', - 'export_feature_values', - 'search_features', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_featurestore_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_featurestore_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_featurestore_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_featurestore_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - FeaturestoreServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_featurestore_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - FeaturestoreServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.FeaturestoreServiceGrpcTransport, - transports.FeaturestoreServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_featurestore_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.FeaturestoreServiceGrpcTransport, - transports.FeaturestoreServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_featurestore_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.FeaturestoreServiceGrpcTransport, grpc_helpers), - (transports.FeaturestoreServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_featurestore_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) -def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_featurestore_service_host_no_port(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_service_host_with_port(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_featurestore_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_featurestore_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) -def test_featurestore_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) -def test_featurestore_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_featurestore_service_grpc_lro_client(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_featurestore_service_grpc_lro_async_client(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_entity_type_path(): - project = "squid" - location = "clam" - featurestore = "whelk" - entity_type = "octopus" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - actual = FeaturestoreServiceClient.entity_type_path(project, location, featurestore, entity_type) - assert expected == actual - - -def test_parse_entity_type_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "featurestore": "cuttlefish", - "entity_type": "mussel", - } - path = FeaturestoreServiceClient.entity_type_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_entity_type_path(path) - assert expected == actual - -def test_feature_path(): - project = "winkle" - location = "nautilus" - featurestore = "scallop" - entity_type = "abalone" - feature = "squid" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) - actual = FeaturestoreServiceClient.feature_path(project, location, featurestore, entity_type, feature) - assert expected == actual - - -def test_parse_feature_path(): - expected = { - "project": "clam", - "location": "whelk", - "featurestore": "octopus", - "entity_type": "oyster", - "feature": "nudibranch", - } - path = FeaturestoreServiceClient.feature_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_feature_path(path) - assert expected == actual - -def test_featurestore_path(): - project = "cuttlefish" - location = "mussel" - featurestore = "winkle" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) - actual = FeaturestoreServiceClient.featurestore_path(project, location, featurestore) - assert expected == actual - - -def test_parse_featurestore_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "featurestore": "abalone", - } - path = FeaturestoreServiceClient.featurestore_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_featurestore_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = FeaturestoreServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = FeaturestoreServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = FeaturestoreServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = FeaturestoreServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = FeaturestoreServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = FeaturestoreServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = FeaturestoreServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = FeaturestoreServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = FeaturestoreServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = FeaturestoreServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = FeaturestoreServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py deleted file mode 100644 index d5d3870fb6..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py +++ /dev/null @@ -1,2859 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import IndexEndpointServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import IndexEndpointServiceClient -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import transports -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert IndexEndpointServiceClient._get_default_mtls_endpoint(None) is None - assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, -]) -def test_index_endpoint_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, -]) -def test_index_endpoint_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.IndexEndpointServiceGrpcTransport, "grpc"), - (transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_endpoint_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, -]) -def test_index_endpoint_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_endpoint_service_client_get_transport_class(): - transport = IndexEndpointServiceClient.get_transport_class() - available_transports = [ - transports.IndexEndpointServiceGrpcTransport, - ] - assert transport in available_transports - - transport = IndexEndpointServiceClient.get_transport_class("grpc") - assert transport == transports.IndexEndpointServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) -@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) -def test_index_endpoint_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "true"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "false"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) -@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_index_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_index_endpoint_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = IndexEndpointServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.CreateIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_index_endpoint_from_dict(): - test_create_index_endpoint(request_type=dict) - - -def test_create_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - client.create_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_create_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.CreateIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_index_endpoint_async_from_dict(): - await test_create_index_endpoint_async(request_type=dict) - - -def test_create_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.CreateIndexEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.CreateIndexEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_index_endpoint( - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint(name='name_value') - - -def test_create_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_index_endpoint( - index_endpoint_service.CreateIndexEndpointRequest(), - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_index_endpoint( - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint(name='name_value') - - -@pytest.mark.asyncio -async def test_create_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_index_endpoint( - index_endpoint_service.CreateIndexEndpointRequest(), - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - -def test_get_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.GetIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - ) - response = client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.GetIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - - -def test_get_index_endpoint_from_dict(): - test_get_index_endpoint(request_type=dict) - - -def test_get_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - client.get_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.GetIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_get_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.GetIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - )) - response = await client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.GetIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_get_index_endpoint_async_from_dict(): - await test_get_index_endpoint_async(request_type=dict) - - -def test_get_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.GetIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - call.return_value = index_endpoint.IndexEndpoint() - client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.GetIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) - await client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint.IndexEndpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_index_endpoint( - index_endpoint_service.GetIndexEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint.IndexEndpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_index_endpoint( - index_endpoint_service.GetIndexEndpointRequest(), - name='name_value', - ) - - -def test_list_index_endpoints(transport: str = 'grpc', request_type=index_endpoint_service.ListIndexEndpointsRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint_service.ListIndexEndpointsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexEndpointsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_index_endpoints_from_dict(): - test_list_index_endpoints(request_type=dict) - - -def test_list_index_endpoints_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - client.list_index_endpoints() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() - - -@pytest.mark.asyncio -async def test_list_index_endpoints_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.ListIndexEndpointsRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexEndpointsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_index_endpoints_async_from_dict(): - await test_list_index_endpoints_async(request_type=dict) - - -def test_list_index_endpoints_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.ListIndexEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - call.return_value = index_endpoint_service.ListIndexEndpointsResponse() - client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_index_endpoints_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.ListIndexEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) - await client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_index_endpoints_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint_service.ListIndexEndpointsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_index_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_index_endpoints_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_index_endpoints( - index_endpoint_service.ListIndexEndpointsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_index_endpoints_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint_service.ListIndexEndpointsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_index_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_index_endpoints_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_index_endpoints( - index_endpoint_service.ListIndexEndpointsRequest(), - parent='parent_value', - ) - - -def test_list_index_endpoints_pager(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_index_endpoints(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, index_endpoint.IndexEndpoint) - for i in results) - -def test_list_index_endpoints_pages(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - pages = list(client.list_index_endpoints(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_index_endpoints_async_pager(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_index_endpoints(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, index_endpoint.IndexEndpoint) - for i in responses) - -@pytest.mark.asyncio -async def test_list_index_endpoints_async_pages(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_index_endpoints(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.UpdateIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - ) - response = client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - - -def test_update_index_endpoint_from_dict(): - test_update_index_endpoint(request_type=dict) - - -def test_update_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - client.update_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_update_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UpdateIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - )) - response = await client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_update_index_endpoint_async_from_dict(): - await test_update_index_endpoint_async(request_type=dict) - - -def test_update_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UpdateIndexEndpointRequest() - - request.index_endpoint.name = 'index_endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - call.return_value = gca_index_endpoint.IndexEndpoint() - client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint.name=index_endpoint.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UpdateIndexEndpointRequest() - - request.index_endpoint.name = 'index_endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) - await client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint.name=index_endpoint.name/value', - ) in kw['metadata'] - - -def test_update_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_index_endpoint.IndexEndpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_index_endpoint( - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_index_endpoint( - index_endpoint_service.UpdateIndexEndpointRequest(), - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_index_endpoint.IndexEndpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_index_endpoint( - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].index_endpoint == gca_index_endpoint.IndexEndpoint(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_index_endpoint( - index_endpoint_service.UpdateIndexEndpointRequest(), - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.DeleteIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_index_endpoint_from_dict(): - test_delete_index_endpoint(request_type=dict) - - -def test_delete_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - client.delete_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeleteIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_async_from_dict(): - await test_delete_index_endpoint_async(request_type=dict) - - -def test_delete_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeleteIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeleteIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_index_endpoint( - index_endpoint_service.DeleteIndexEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_index_endpoint( - index_endpoint_service.DeleteIndexEndpointRequest(), - name='name_value', - ) - - -def test_deploy_index(transport: str = 'grpc', request_type=index_endpoint_service.DeployIndexRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_deploy_index_from_dict(): - test_deploy_index(request_type=dict) - - -def test_deploy_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - client.deploy_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeployIndexRequest() - - -@pytest.mark.asyncio -async def test_deploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeployIndexRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_deploy_index_async_from_dict(): - await test_deploy_index_async(request_type=dict) - - -def test_deploy_index_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_deploy_index_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -def test_deploy_index_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.deploy_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].index_endpoint == 'index_endpoint_value' - assert args[0].deployed_index == gca_index_endpoint.DeployedIndex(id='id_value') - - -def test_deploy_index_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_index( - index_endpoint_service.DeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_deploy_index_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.deploy_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].index_endpoint == 'index_endpoint_value' - assert args[0].deployed_index == gca_index_endpoint.DeployedIndex(id='id_value') - - -@pytest.mark.asyncio -async def test_deploy_index_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.deploy_index( - index_endpoint_service.DeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -def test_undeploy_index(transport: str = 'grpc', request_type=index_endpoint_service.UndeployIndexRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UndeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undeploy_index_from_dict(): - test_undeploy_index(request_type=dict) - - -def test_undeploy_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - client.undeploy_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UndeployIndexRequest() - - -@pytest.mark.asyncio -async def test_undeploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UndeployIndexRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UndeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undeploy_index_async_from_dict(): - await test_undeploy_index_async(request_type=dict) - - -def test_undeploy_index_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UndeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undeploy_index_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UndeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -def test_undeploy_index_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undeploy_index( - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].index_endpoint == 'index_endpoint_value' - assert args[0].deployed_index_id == 'deployed_index_id_value' - - -def test_undeploy_index_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_index( - index_endpoint_service.UndeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - -@pytest.mark.asyncio -async def test_undeploy_index_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undeploy_index( - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].index_endpoint == 'index_endpoint_value' - assert args[0].deployed_index_id == 'deployed_index_id_value' - - -@pytest.mark.asyncio -async def test_undeploy_index_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undeploy_index( - index_endpoint_service.UndeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexEndpointServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexEndpointServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = IndexEndpointServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.IndexEndpointServiceGrpcTransport, - transports.IndexEndpointServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.IndexEndpointServiceGrpcTransport, - ) - -def test_index_endpoint_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.IndexEndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_index_endpoint_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.IndexEndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_index_endpoint', - 'get_index_endpoint', - 'list_index_endpoints', - 'update_index_endpoint', - 'delete_index_endpoint', - 'deploy_index', - 'undeploy_index', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_index_endpoint_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexEndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_index_endpoint_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexEndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_index_endpoint_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexEndpointServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_index_endpoint_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - IndexEndpointServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_index_endpoint_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - IndexEndpointServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.IndexEndpointServiceGrpcTransport, - transports.IndexEndpointServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_index_endpoint_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.IndexEndpointServiceGrpcTransport, - transports.IndexEndpointServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_index_endpoint_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.IndexEndpointServiceGrpcTransport, grpc_helpers), - (transports.IndexEndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_index_endpoint_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) -def test_index_endpoint_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_index_endpoint_service_host_no_port(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_endpoint_service_host_with_port(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_index_endpoint_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexEndpointServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_index_endpoint_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) -def test_index_endpoint_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) -def test_index_endpoint_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_index_endpoint_service_grpc_lro_client(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_endpoint_service_grpc_lro_async_client(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_path(): - project = "squid" - location = "clam" - index = "whelk" - expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - actual = IndexEndpointServiceClient.index_path(project, location, index) - assert expected == actual - - -def test_parse_index_path(): - expected = { - "project": "octopus", - "location": "oyster", - "index": "nudibranch", - } - path = IndexEndpointServiceClient.index_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_index_path(path) - assert expected == actual - -def test_index_endpoint_path(): - project = "cuttlefish" - location = "mussel" - index_endpoint = "winkle" - expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - actual = IndexEndpointServiceClient.index_endpoint_path(project, location, index_endpoint) - assert expected == actual - - -def test_parse_index_endpoint_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "index_endpoint": "abalone", - } - path = IndexEndpointServiceClient.index_endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_index_endpoint_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = IndexEndpointServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = IndexEndpointServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = IndexEndpointServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = IndexEndpointServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = IndexEndpointServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = IndexEndpointServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = IndexEndpointServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = IndexEndpointServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = IndexEndpointServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = IndexEndpointServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = IndexEndpointServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py deleted file mode 100644 index 18f0fe79b9..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py +++ /dev/null @@ -1,2370 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceClient -from google.cloud.aiplatform_v1beta1.services.index_service import pagers -from google.cloud.aiplatform_v1beta1.services.index_service import transports -from google.cloud.aiplatform_v1beta1.services.index_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import deployed_index_ref -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index as gca_index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert IndexServiceClient._get_default_mtls_endpoint(None) is None - assert IndexServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - IndexServiceClient, - IndexServiceAsyncClient, -]) -def test_index_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - IndexServiceClient, - IndexServiceAsyncClient, -]) -def test_index_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.IndexServiceGrpcTransport, "grpc"), - (transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - IndexServiceClient, - IndexServiceAsyncClient, -]) -def test_index_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_service_client_get_transport_class(): - transport = IndexServiceClient.get_transport_class() - available_transports = [ - transports.IndexServiceGrpcTransport, - ] - assert transport in available_transports - - transport = IndexServiceClient.get_transport_class("grpc") - assert transport == transports.IndexServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) -@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) -def test_index_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "true"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "false"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) -@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_index_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_index_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = IndexServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_index(transport: str = 'grpc', request_type=index_service.CreateIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.CreateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_index_from_dict(): - test_create_index(request_type=dict) - - -def test_create_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - client.create_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.CreateIndexRequest() - - -@pytest.mark.asyncio -async def test_create_index_async(transport: str = 'grpc_asyncio', request_type=index_service.CreateIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.CreateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_index_async_from_dict(): - await test_create_index_async(request_type=dict) - - -def test_create_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.CreateIndexRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.CreateIndexRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_index( - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].index == gca_index.Index(name='name_value') - - -def test_create_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_index( - index_service.CreateIndexRequest(), - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_index( - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].index == gca_index.Index(name='name_value') - - -@pytest.mark.asyncio -async def test_create_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_index( - index_service.CreateIndexRequest(), - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - -def test_get_index(transport: str = 'grpc', request_type=index_service.GetIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index.Index( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.GetIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index.Index) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_get_index_from_dict(): - test_get_index(request_type=dict) - - -def test_get_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - client.get_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.GetIndexRequest() - - -@pytest.mark.asyncio -async def test_get_index_async(transport: str = 'grpc_asyncio', request_type=index_service.GetIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index.Index( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.GetIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index.Index) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_index_async_from_dict(): - await test_get_index_async(request_type=dict) - - -def test_get_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.GetIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - call.return_value = index.Index() - client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.GetIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) - await client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index.Index() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_index( - index_service.GetIndexRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index.Index() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_index( - index_service.GetIndexRequest(), - name='name_value', - ) - - -def test_list_indexes(transport: str = 'grpc', request_type=index_service.ListIndexesRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_service.ListIndexesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.ListIndexesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_indexes_from_dict(): - test_list_indexes(request_type=dict) - - -def test_list_indexes_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - client.list_indexes() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.ListIndexesRequest() - - -@pytest.mark.asyncio -async def test_list_indexes_async(transport: str = 'grpc_asyncio', request_type=index_service.ListIndexesRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.ListIndexesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_indexes_async_from_dict(): - await test_list_indexes_async(request_type=dict) - - -def test_list_indexes_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.ListIndexesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - call.return_value = index_service.ListIndexesResponse() - client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_indexes_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.ListIndexesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) - await client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_indexes_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_service.ListIndexesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_indexes( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_indexes_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_indexes( - index_service.ListIndexesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_indexes_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_service.ListIndexesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_indexes( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_indexes_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_indexes( - index_service.ListIndexesRequest(), - parent='parent_value', - ) - - -def test_list_indexes_pager(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_indexes(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, index.Index) - for i in results) - -def test_list_indexes_pages(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - pages = list(client.list_indexes(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_indexes_async_pager(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_indexes(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, index.Index) - for i in responses) - -@pytest.mark.asyncio -async def test_list_indexes_async_pages(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_indexes(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_index(transport: str = 'grpc', request_type=index_service.UpdateIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.UpdateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_index_from_dict(): - test_update_index(request_type=dict) - - -def test_update_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - client.update_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.UpdateIndexRequest() - - -@pytest.mark.asyncio -async def test_update_index_async(transport: str = 'grpc_asyncio', request_type=index_service.UpdateIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.UpdateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_index_async_from_dict(): - await test_update_index_async(request_type=dict) - - -def test_update_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.UpdateIndexRequest() - - request.index.name = 'index.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index.name=index.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.UpdateIndexRequest() - - request.index.name = 'index.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index.name=index.name/value', - ) in kw['metadata'] - - -def test_update_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_index( - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].index == gca_index.Index(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_index( - index_service.UpdateIndexRequest(), - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_index( - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].index == gca_index.Index(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_index( - index_service.UpdateIndexRequest(), - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_index(transport: str = 'grpc', request_type=index_service.DeleteIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.DeleteIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_index_from_dict(): - test_delete_index(request_type=dict) - - -def test_delete_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - client.delete_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.DeleteIndexRequest() - - -@pytest.mark.asyncio -async def test_delete_index_async(transport: str = 'grpc_asyncio', request_type=index_service.DeleteIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.DeleteIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_index_async_from_dict(): - await test_delete_index_async(request_type=dict) - - -def test_delete_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.DeleteIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.DeleteIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_index( - index_service.DeleteIndexRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_index( - index_service.DeleteIndexRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = IndexServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.IndexServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.IndexServiceGrpcTransport, - transports.IndexServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.IndexServiceGrpcTransport, - ) - -def test_index_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.IndexServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_index_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.IndexServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_index', - 'get_index', - 'list_indexes', - 'update_index', - 'delete_index', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_index_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_index_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_index_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_index_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - IndexServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_index_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - IndexServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.IndexServiceGrpcTransport, - transports.IndexServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_index_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.IndexServiceGrpcTransport, - transports.IndexServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_index_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.IndexServiceGrpcTransport, grpc_helpers), - (transports.IndexServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_index_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) -def test_index_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_index_service_host_no_port(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_service_host_with_port(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_index_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_index_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) -def test_index_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) -def test_index_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_index_service_grpc_lro_client(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_service_grpc_lro_async_client(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_path(): - project = "squid" - location = "clam" - index = "whelk" - expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - actual = IndexServiceClient.index_path(project, location, index) - assert expected == actual - - -def test_parse_index_path(): - expected = { - "project": "octopus", - "location": "oyster", - "index": "nudibranch", - } - path = IndexServiceClient.index_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_index_path(path) - assert expected == actual - -def test_index_endpoint_path(): - project = "cuttlefish" - location = "mussel" - index_endpoint = "winkle" - expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - actual = IndexServiceClient.index_endpoint_path(project, location, index_endpoint) - assert expected == actual - - -def test_parse_index_endpoint_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "index_endpoint": "abalone", - } - path = IndexServiceClient.index_endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_index_endpoint_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = IndexServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = IndexServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = IndexServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = IndexServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = IndexServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = IndexServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = IndexServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = IndexServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = IndexServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = IndexServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = IndexServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py deleted file mode 100644 index d9d43b0ec5..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ /dev/null @@ -1,9019 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceClient -from google.cloud.aiplatform_v1beta1.services.job_service import pagers -from google.cloud.aiplatform_v1beta1.services.job_service import transports -from google.cloud.aiplatform_v1beta1.services.job_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import accelerator_type -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import completion_stats -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_monitoring -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import study -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert JobServiceClient._get_default_mtls_endpoint(None) is None - assert JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.JobServiceGrpcTransport, "grpc"), - (transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_job_service_client_get_transport_class(): - transport = JobServiceClient.get_transport_class() - available_transports = [ - transports.JobServiceGrpcTransport, - ] - assert transport in available_transports - - transport = JobServiceClient.get_transport_class("grpc") - assert transport == transports.JobServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) -@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) -def test_job_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) -@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_job_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_job_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = JobServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_custom_job(transport: str = 'grpc', request_type=job_service.CreateCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_custom_job_from_dict(): - test_create_custom_job(request_type=dict) - - -def test_create_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - client.create_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - -@pytest.mark.asyncio -async def test_create_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_custom_job_async_from_dict(): - await test_create_custom_job_async(request_type=dict) - - -def test_create_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateCustomJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - call.return_value = gca_custom_job.CustomJob() - client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateCustomJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) - await client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_custom_job( - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].custom_job == gca_custom_job.CustomJob(name='name_value') - - -def test_create_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_custom_job( - job_service.CreateCustomJobRequest(), - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_custom_job( - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].custom_job == gca_custom_job.CustomJob(name='name_value') - - -@pytest.mark.asyncio -async def test_create_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_custom_job( - job_service.CreateCustomJobRequest(), - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - -def test_get_custom_job(transport: str = 'grpc', request_type=job_service.GetCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_custom_job_from_dict(): - test_get_custom_job(request_type=dict) - - -def test_get_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - client.get_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - -@pytest.mark.asyncio -async def test_get_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_custom_job_async_from_dict(): - await test_get_custom_job_async(request_type=dict) - - -def test_get_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - call.return_value = custom_job.CustomJob() - client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) - await client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_custom_job( - job_service.GetCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_custom_job( - job_service.GetCustomJobRequest(), - name='name_value', - ) - - -def test_list_custom_jobs(transport: str = 'grpc', request_type=job_service.ListCustomJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCustomJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_custom_jobs_from_dict(): - test_list_custom_jobs(request_type=dict) - - -def test_list_custom_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - client.list_custom_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - -@pytest.mark.asyncio -async def test_list_custom_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListCustomJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCustomJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_from_dict(): - await test_list_custom_jobs_async(request_type=dict) - - -def test_list_custom_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListCustomJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - call.return_value = job_service.ListCustomJobsResponse() - client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_custom_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListCustomJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) - await client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_custom_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_custom_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_custom_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_custom_jobs( - job_service.ListCustomJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_custom_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_custom_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_custom_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_custom_jobs( - job_service.ListCustomJobsRequest(), - parent='parent_value', - ) - - -def test_list_custom_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_custom_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, custom_job.CustomJob) - for i in results) - -def test_list_custom_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_custom_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_custom_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, custom_job.CustomJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_custom_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_custom_job(transport: str = 'grpc', request_type=job_service.DeleteCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_custom_job_from_dict(): - test_delete_custom_job(request_type=dict) - - -def test_delete_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - client.delete_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - -@pytest.mark.asyncio -async def test_delete_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_custom_job_async_from_dict(): - await test_delete_custom_job_async(request_type=dict) - - -def test_delete_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_custom_job( - job_service.DeleteCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_custom_job( - job_service.DeleteCustomJobRequest(), - name='name_value', - ) - - -def test_cancel_custom_job(transport: str = 'grpc', request_type=job_service.CancelCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_custom_job_from_dict(): - test_cancel_custom_job(request_type=dict) - - -def test_cancel_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - client.cancel_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_custom_job_async_from_dict(): - await test_cancel_custom_job_async(request_type=dict) - - -def test_cancel_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - call.return_value = None - client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_custom_job( - job_service.CancelCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_custom_job( - job_service.CancelCustomJobRequest(), - name='name_value', - ) - - -def test_create_data_labeling_job(transport: str = 'grpc', request_type=job_service.CreateDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - ) - response = client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -def test_create_data_labeling_job_from_dict(): - test_create_data_labeling_job(request_type=dict) - - -def test_create_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - client.create_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - )) - response = await client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_async_from_dict(): - await test_create_data_labeling_job_async(request_type=dict) - - -def test_create_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateDataLabelingJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - call.return_value = gca_data_labeling_job.DataLabelingJob() - client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateDataLabelingJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) - await client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_data_labeling_job( - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].data_labeling_job == gca_data_labeling_job.DataLabelingJob(name='name_value') - - -def test_create_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_data_labeling_job( - job_service.CreateDataLabelingJobRequest(), - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_data_labeling_job( - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].data_labeling_job == gca_data_labeling_job.DataLabelingJob(name='name_value') - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_data_labeling_job( - job_service.CreateDataLabelingJobRequest(), - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - -def test_get_data_labeling_job(transport: str = 'grpc', request_type=job_service.GetDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - ) - response = client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -def test_get_data_labeling_job_from_dict(): - test_get_data_labeling_job(request_type=dict) - - -def test_get_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - client.get_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - )) - response = await client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_async_from_dict(): - await test_get_data_labeling_job_async(request_type=dict) - - -def test_get_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - call.return_value = data_labeling_job.DataLabelingJob() - client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) - await client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_data_labeling_job( - job_service.GetDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_data_labeling_job( - job_service.GetDataLabelingJobRequest(), - name='name_value', - ) - - -def test_list_data_labeling_jobs(transport: str = 'grpc', request_type=job_service.ListDataLabelingJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataLabelingJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_data_labeling_jobs_from_dict(): - test_list_data_labeling_jobs(request_type=dict) - - -def test_list_data_labeling_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - client.list_data_labeling_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListDataLabelingJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_from_dict(): - await test_list_data_labeling_jobs_async(request_type=dict) - - -def test_list_data_labeling_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListDataLabelingJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - call.return_value = job_service.ListDataLabelingJobsResponse() - client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListDataLabelingJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) - await client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_data_labeling_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_data_labeling_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_data_labeling_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_data_labeling_jobs( - job_service.ListDataLabelingJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_data_labeling_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_data_labeling_jobs( - job_service.ListDataLabelingJobsRequest(), - parent='parent_value', - ) - - -def test_list_data_labeling_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_data_labeling_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, data_labeling_job.DataLabelingJob) - for i in results) - -def test_list_data_labeling_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_data_labeling_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_data_labeling_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, data_labeling_job.DataLabelingJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_data_labeling_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_data_labeling_job(transport: str = 'grpc', request_type=job_service.DeleteDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_data_labeling_job_from_dict(): - test_delete_data_labeling_job(request_type=dict) - - -def test_delete_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - client.delete_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_async_from_dict(): - await test_delete_data_labeling_job_async(request_type=dict) - - -def test_delete_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_data_labeling_job( - job_service.DeleteDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_data_labeling_job( - job_service.DeleteDataLabelingJobRequest(), - name='name_value', - ) - - -def test_cancel_data_labeling_job(transport: str = 'grpc', request_type=job_service.CancelDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_data_labeling_job_from_dict(): - test_cancel_data_labeling_job(request_type=dict) - - -def test_cancel_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - client.cancel_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_async_from_dict(): - await test_cancel_data_labeling_job_async(request_type=dict) - - -def test_cancel_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - call.return_value = None - client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_data_labeling_job( - job_service.CancelDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_data_labeling_job( - job_service.CancelDataLabelingJobRequest(), - name='name_value', - ) - - -def test_create_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CreateHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_hyperparameter_tuning_job_from_dict(): - test_create_hyperparameter_tuning_job(request_type=dict) - - -def test_create_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - client.create_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_async_from_dict(): - await test_create_hyperparameter_tuning_job_async(request_type=dict) - - -def test_create_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateHyperparameterTuningJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateHyperparameterTuningJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) - await client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_hyperparameter_tuning_job( - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].hyperparameter_tuning_job == gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') - - -def test_create_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_hyperparameter_tuning_job( - job_service.CreateHyperparameterTuningJobRequest(), - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_hyperparameter_tuning_job( - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].hyperparameter_tuning_job == gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_hyperparameter_tuning_job( - job_service.CreateHyperparameterTuningJobRequest(), - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - -def test_get_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.GetHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_hyperparameter_tuning_job_from_dict(): - test_get_hyperparameter_tuning_job(request_type=dict) - - -def test_get_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - client.get_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_async_from_dict(): - await test_get_hyperparameter_tuning_job_async(request_type=dict) - - -def test_get_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) - await client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_hyperparameter_tuning_job( - job_service.GetHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_hyperparameter_tuning_job( - job_service.GetHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_list_hyperparameter_tuning_jobs(transport: str = 'grpc', request_type=job_service.ListHyperparameterTuningJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHyperparameterTuningJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_hyperparameter_tuning_jobs_from_dict(): - test_list_hyperparameter_tuning_jobs(request_type=dict) - - -def test_list_hyperparameter_tuning_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - client.list_hyperparameter_tuning_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListHyperparameterTuningJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_from_dict(): - await test_list_hyperparameter_tuning_jobs_async(request_type=dict) - - -def test_list_hyperparameter_tuning_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListHyperparameterTuningJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListHyperparameterTuningJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) - await client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_hyperparameter_tuning_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_hyperparameter_tuning_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_hyperparameter_tuning_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hyperparameter_tuning_jobs( - job_service.ListHyperparameterTuningJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_hyperparameter_tuning_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_hyperparameter_tuning_jobs( - job_service.ListHyperparameterTuningJobsRequest(), - parent='parent_value', - ) - - -def test_list_hyperparameter_tuning_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_hyperparameter_tuning_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) - for i in results) - -def test_list_hyperparameter_tuning_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_hyperparameter_tuning_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_hyperparameter_tuning_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.DeleteHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_hyperparameter_tuning_job_from_dict(): - test_delete_hyperparameter_tuning_job(request_type=dict) - - -def test_delete_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - client.delete_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_async_from_dict(): - await test_delete_hyperparameter_tuning_job_async(request_type=dict) - - -def test_delete_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_hyperparameter_tuning_job( - job_service.DeleteHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_hyperparameter_tuning_job( - job_service.DeleteHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_cancel_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CancelHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_hyperparameter_tuning_job_from_dict(): - test_cancel_hyperparameter_tuning_job(request_type=dict) - - -def test_cancel_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - client.cancel_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_async_from_dict(): - await test_cancel_hyperparameter_tuning_job_async(request_type=dict) - - -def test_cancel_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = None - client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_hyperparameter_tuning_job( - job_service.CancelHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_hyperparameter_tuning_job( - job_service.CancelHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_create_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CreateBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_batch_prediction_job_from_dict(): - test_create_batch_prediction_job(request_type=dict) - - -def test_create_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - client.create_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_async_from_dict(): - await test_create_batch_prediction_job_async(request_type=dict) - - -def test_create_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateBatchPredictionJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateBatchPredictionJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) - await client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_batch_prediction_job( - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].batch_prediction_job == gca_batch_prediction_job.BatchPredictionJob(name='name_value') - - -def test_create_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_batch_prediction_job( - job_service.CreateBatchPredictionJobRequest(), - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_batch_prediction_job( - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].batch_prediction_job == gca_batch_prediction_job.BatchPredictionJob(name='name_value') - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_batch_prediction_job( - job_service.CreateBatchPredictionJobRequest(), - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - -def test_get_batch_prediction_job(transport: str = 'grpc', request_type=job_service.GetBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_batch_prediction_job_from_dict(): - test_get_batch_prediction_job(request_type=dict) - - -def test_get_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - client.get_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_async_from_dict(): - await test_get_batch_prediction_job_async(request_type=dict) - - -def test_get_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - call.return_value = batch_prediction_job.BatchPredictionJob() - client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) - await client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_batch_prediction_job( - job_service.GetBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_batch_prediction_job( - job_service.GetBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_list_batch_prediction_jobs(transport: str = 'grpc', request_type=job_service.ListBatchPredictionJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBatchPredictionJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_batch_prediction_jobs_from_dict(): - test_list_batch_prediction_jobs(request_type=dict) - - -def test_list_batch_prediction_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - client.list_batch_prediction_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListBatchPredictionJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_from_dict(): - await test_list_batch_prediction_jobs_async(request_type=dict) - - -def test_list_batch_prediction_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListBatchPredictionJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - call.return_value = job_service.ListBatchPredictionJobsResponse() - client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListBatchPredictionJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) - await client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_batch_prediction_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_batch_prediction_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_batch_prediction_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_batch_prediction_jobs( - job_service.ListBatchPredictionJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_batch_prediction_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_batch_prediction_jobs( - job_service.ListBatchPredictionJobsRequest(), - parent='parent_value', - ) - - -def test_list_batch_prediction_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_batch_prediction_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) - for i in results) - -def test_list_batch_prediction_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_batch_prediction_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_batch_prediction_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_batch_prediction_job(transport: str = 'grpc', request_type=job_service.DeleteBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_batch_prediction_job_from_dict(): - test_delete_batch_prediction_job(request_type=dict) - - -def test_delete_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - client.delete_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_async_from_dict(): - await test_delete_batch_prediction_job_async(request_type=dict) - - -def test_delete_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_batch_prediction_job( - job_service.DeleteBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_batch_prediction_job( - job_service.DeleteBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_cancel_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CancelBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_batch_prediction_job_from_dict(): - test_cancel_batch_prediction_job(request_type=dict) - - -def test_cancel_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - client.cancel_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_async_from_dict(): - await test_cancel_batch_prediction_job_async(request_type=dict) - - -def test_cancel_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - call.return_value = None - client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_batch_prediction_job( - job_service.CancelBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_batch_prediction_job( - job_service.CancelBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_create_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - ) - response = client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - - -def test_create_model_deployment_monitoring_job_from_dict(): - test_create_model_deployment_monitoring_job(request_type=dict) - - -def test_create_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - client.create_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - )) - response = await client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_async_from_dict(): - await test_create_model_deployment_monitoring_job_async(request_type=dict) - - -def test_create_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateModelDeploymentMonitoringJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateModelDeploymentMonitoringJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - await client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_model_deployment_monitoring_job( - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - - -def test_create_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_model_deployment_monitoring_job( - job_service.CreateModelDeploymentMonitoringJobRequest(), - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_model_deployment_monitoring_job( - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_model_deployment_monitoring_job( - job_service.CreateModelDeploymentMonitoringJobRequest(), - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - -def test_search_model_deployment_monitoring_stats_anomalies(transport: str = 'grpc', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_model_deployment_monitoring_stats_anomalies_from_dict(): - test_search_model_deployment_monitoring_stats_anomalies(request_type=dict) - - -def test_search_model_deployment_monitoring_stats_anomalies_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - client.search_model_deployment_monitoring_stats_anomalies() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async(transport: str = 'grpc_asyncio', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict(): - await test_search_model_deployment_monitoring_stats_anomalies_async(request_type=dict) - - -def test_search_model_deployment_monitoring_stats_anomalies_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - request.model_deployment_monitoring_job = 'model_deployment_monitoring_job/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() - client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job=model_deployment_monitoring_job/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - request.model_deployment_monitoring_job = 'model_deployment_monitoring_job/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) - await client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job=model_deployment_monitoring_job/value', - ) in kw['metadata'] - - -def test_search_model_deployment_monitoring_stats_anomalies_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_model_deployment_monitoring_stats_anomalies( - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - assert args[0].deployed_model_id == 'deployed_model_id_value' - - -def test_search_model_deployment_monitoring_stats_anomalies_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_model_deployment_monitoring_stats_anomalies( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_model_deployment_monitoring_stats_anomalies( - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - assert args[0].deployed_model_id == 'deployed_model_id_value' - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_model_deployment_monitoring_stats_anomalies( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - -def test_search_model_deployment_monitoring_stats_anomalies_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('model_deployment_monitoring_job', ''), - )), - ) - pager = client.search_model_deployment_monitoring_stats_anomalies(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) - for i in results) - -def test_search_model_deployment_monitoring_stats_anomalies_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - pages = list(client.search_model_deployment_monitoring_stats_anomalies(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_model_deployment_monitoring_stats_anomalies(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) - for i in responses) - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_model_deployment_monitoring_stats_anomalies(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.GetModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - ) - response = client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - - -def test_get_model_deployment_monitoring_job_from_dict(): - test_get_model_deployment_monitoring_job(request_type=dict) - - -def test_get_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - client.get_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - )) - response = await client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_async_from_dict(): - await test_get_model_deployment_monitoring_job_async(request_type=dict) - - -def test_get_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - await client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_deployment_monitoring_job( - job_service.GetModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_deployment_monitoring_job( - job_service.GetModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_list_model_deployment_monitoring_jobs(transport: str = 'grpc', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_deployment_monitoring_jobs_from_dict(): - test_list_model_deployment_monitoring_jobs(request_type=dict) - - -def test_list_model_deployment_monitoring_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - client.list_model_deployment_monitoring_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async_from_dict(): - await test_list_model_deployment_monitoring_jobs_async(request_type=dict) - - -def test_list_model_deployment_monitoring_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListModelDeploymentMonitoringJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() - client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListModelDeploymentMonitoringJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) - await client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_deployment_monitoring_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_deployment_monitoring_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_model_deployment_monitoring_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_deployment_monitoring_jobs( - job_service.ListModelDeploymentMonitoringJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_deployment_monitoring_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_deployment_monitoring_jobs( - job_service.ListModelDeploymentMonitoringJobsRequest(), - parent='parent_value', - ) - - -def test_list_model_deployment_monitoring_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_deployment_monitoring_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - for i in results) - -def test_list_model_deployment_monitoring_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_deployment_monitoring_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_deployment_monitoring_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_model_deployment_monitoring_job_from_dict(): - test_update_model_deployment_monitoring_job(request_type=dict) - - -def test_update_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - client.update_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_async_from_dict(): - await test_update_model_deployment_monitoring_job_async(request_type=dict) - - -def test_update_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.UpdateModelDeploymentMonitoringJobRequest() - - request.model_deployment_monitoring_job.name = 'model_deployment_monitoring_job.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.UpdateModelDeploymentMonitoringJobRequest() - - request.model_deployment_monitoring_job.name = 'model_deployment_monitoring_job.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value', - ) in kw['metadata'] - - -def test_update_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_model_deployment_monitoring_job( - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_model_deployment_monitoring_job( - job_service.UpdateModelDeploymentMonitoringJobRequest(), - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_model_deployment_monitoring_job( - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].model_deployment_monitoring_job == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_model_deployment_monitoring_job( - job_service.UpdateModelDeploymentMonitoringJobRequest(), - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_model_deployment_monitoring_job_from_dict(): - test_delete_model_deployment_monitoring_job(request_type=dict) - - -def test_delete_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - client.delete_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_async_from_dict(): - await test_delete_model_deployment_monitoring_job_async(request_type=dict) - - -def test_delete_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model_deployment_monitoring_job( - job_service.DeleteModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_model_deployment_monitoring_job( - job_service.DeleteModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_pause_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_pause_model_deployment_monitoring_job_from_dict(): - test_pause_model_deployment_monitoring_job(request_type=dict) - - -def test_pause_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - client.pause_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_async_from_dict(): - await test_pause_model_deployment_monitoring_job_async(request_type=dict) - - -def test_pause_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.PauseModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = None - client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.PauseModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_pause_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.pause_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_pause_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.pause_model_deployment_monitoring_job( - job_service.PauseModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.pause_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.pause_model_deployment_monitoring_job( - job_service.PauseModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_resume_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_resume_model_deployment_monitoring_job_from_dict(): - test_resume_model_deployment_monitoring_job(request_type=dict) - - -def test_resume_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - client.resume_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_async_from_dict(): - await test_resume_model_deployment_monitoring_job_async(request_type=dict) - - -def test_resume_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ResumeModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = None - client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ResumeModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_resume_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.resume_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_resume_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.resume_model_deployment_monitoring_job( - job_service.ResumeModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.resume_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.resume_model_deployment_monitoring_job( - job_service.ResumeModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = JobServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.JobServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.JobServiceGrpcTransport, - ) - -def test_job_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.JobServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_job_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.JobServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_custom_job', - 'get_custom_job', - 'list_custom_jobs', - 'delete_custom_job', - 'cancel_custom_job', - 'create_data_labeling_job', - 'get_data_labeling_job', - 'list_data_labeling_jobs', - 'delete_data_labeling_job', - 'cancel_data_labeling_job', - 'create_hyperparameter_tuning_job', - 'get_hyperparameter_tuning_job', - 'list_hyperparameter_tuning_jobs', - 'delete_hyperparameter_tuning_job', - 'cancel_hyperparameter_tuning_job', - 'create_batch_prediction_job', - 'get_batch_prediction_job', - 'list_batch_prediction_jobs', - 'delete_batch_prediction_job', - 'cancel_batch_prediction_job', - 'create_model_deployment_monitoring_job', - 'search_model_deployment_monitoring_stats_anomalies', - 'get_model_deployment_monitoring_job', - 'list_model_deployment_monitoring_jobs', - 'update_model_deployment_monitoring_job', - 'delete_model_deployment_monitoring_job', - 'pause_model_deployment_monitoring_job', - 'resume_model_deployment_monitoring_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_job_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_job_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_job_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_job_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_job_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_job_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_job_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.JobServiceGrpcTransport, grpc_helpers), - (transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_job_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_job_service_host_no_port(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_job_service_host_with_port(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_job_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_job_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_job_service_grpc_lro_client(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_job_service_grpc_lro_async_client(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_batch_prediction_job_path(): - project = "squid" - location = "clam" - batch_prediction_job = "whelk" - expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) - actual = JobServiceClient.batch_prediction_job_path(project, location, batch_prediction_job) - assert expected == actual - - -def test_parse_batch_prediction_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "batch_prediction_job": "nudibranch", - } - path = JobServiceClient.batch_prediction_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_batch_prediction_job_path(path) - assert expected == actual - -def test_custom_job_path(): - project = "cuttlefish" - location = "mussel" - custom_job = "winkle" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = JobServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "custom_job": "abalone", - } - path = JobServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_data_labeling_job_path(): - project = "squid" - location = "clam" - data_labeling_job = "whelk" - expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) - actual = JobServiceClient.data_labeling_job_path(project, location, data_labeling_job) - assert expected == actual - - -def test_parse_data_labeling_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "data_labeling_job": "nudibranch", - } - path = JobServiceClient.data_labeling_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_data_labeling_job_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = JobServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", - } - path = JobServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = JobServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = JobServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_hyperparameter_tuning_job_path(): - project = "cuttlefish" - location = "mussel" - hyperparameter_tuning_job = "winkle" - expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) - actual = JobServiceClient.hyperparameter_tuning_job_path(project, location, hyperparameter_tuning_job) - assert expected == actual - - -def test_parse_hyperparameter_tuning_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "hyperparameter_tuning_job": "abalone", - } - path = JobServiceClient.hyperparameter_tuning_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path) - assert expected == actual - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = JobServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = JobServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_deployment_monitoring_job_path(): - project = "cuttlefish" - location = "mussel" - model_deployment_monitoring_job = "winkle" - expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - actual = JobServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) - assert expected == actual - - -def test_parse_model_deployment_monitoring_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model_deployment_monitoring_job": "abalone", - } - path = JobServiceClient.model_deployment_monitoring_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path) - assert expected == actual - -def test_network_path(): - project = "squid" - network = "clam" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = JobServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "whelk", - "network": "octopus", - } - path = JobServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_network_path(path) - assert expected == actual - -def test_tensorboard_path(): - project = "oyster" - location = "nudibranch" - tensorboard = "cuttlefish" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - actual = JobServiceClient.tensorboard_path(project, location, tensorboard) - assert expected == actual - - -def test_parse_tensorboard_path(): - expected = { - "project": "mussel", - "location": "winkle", - "tensorboard": "nautilus", - } - path = JobServiceClient.tensorboard_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_tensorboard_path(path) - assert expected == actual - -def test_trial_path(): - project = "scallop" - location = "abalone" - study = "squid" - trial = "clam" - expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - actual = JobServiceClient.trial_path(project, location, study, trial) - assert expected == actual - - -def test_parse_trial_path(): - expected = { - "project": "whelk", - "location": "octopus", - "study": "oyster", - "trial": "nudibranch", - } - path = JobServiceClient.trial_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_trial_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = JobServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = JobServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = JobServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = JobServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = JobServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = JobServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = JobServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = JobServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = JobServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = JobServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = JobServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py deleted file mode 100644 index cb3e2ae56d..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py +++ /dev/null @@ -1,8381 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.metadata_service import MetadataServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.metadata_service import MetadataServiceClient -from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers -from google.cloud.aiplatform_v1beta1.services.metadata_service import transports -from google.cloud.aiplatform_v1beta1.services.metadata_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert MetadataServiceClient._get_default_mtls_endpoint(None) is None - assert MetadataServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - MetadataServiceClient, - MetadataServiceAsyncClient, -]) -def test_metadata_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - MetadataServiceClient, - MetadataServiceAsyncClient, -]) -def test_metadata_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.MetadataServiceGrpcTransport, "grpc"), - (transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_metadata_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - MetadataServiceClient, - MetadataServiceAsyncClient, -]) -def test_metadata_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_metadata_service_client_get_transport_class(): - transport = MetadataServiceClient.get_transport_class() - available_transports = [ - transports.MetadataServiceGrpcTransport, - ] - assert transport in available_transports - - transport = MetadataServiceClient.get_transport_class("grpc") - assert transport == transports.MetadataServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) -@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) -def test_metadata_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "true"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "false"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) -@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_metadata_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_metadata_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_metadata_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_metadata_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = MetadataServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_metadata_store(transport: str = 'grpc', request_type=metadata_service.CreateMetadataStoreRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_metadata_store_from_dict(): - test_create_metadata_store(request_type=dict) - - -def test_create_metadata_store_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - client.create_metadata_store() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataStoreRequest() - - -@pytest.mark.asyncio -async def test_create_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataStoreRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_metadata_store_async_from_dict(): - await test_create_metadata_store_async(request_type=dict) - - -def test_create_metadata_store_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataStoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_metadata_store_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataStoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_metadata_store_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_metadata_store( - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].metadata_store == gca_metadata_store.MetadataStore(name='name_value') - assert args[0].metadata_store_id == 'metadata_store_id_value' - - -def test_create_metadata_store_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_metadata_store( - metadata_service.CreateMetadataStoreRequest(), - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_metadata_store_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_metadata_store( - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].metadata_store == gca_metadata_store.MetadataStore(name='name_value') - assert args[0].metadata_store_id == 'metadata_store_id_value' - - -@pytest.mark.asyncio -async def test_create_metadata_store_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_metadata_store( - metadata_service.CreateMetadataStoreRequest(), - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - -def test_get_metadata_store(transport: str = 'grpc', request_type=metadata_service.GetMetadataStoreRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_store.MetadataStore( - name='name_value', - description='description_value', - ) - response = client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_store.MetadataStore) - assert response.name == 'name_value' - assert response.description == 'description_value' - - -def test_get_metadata_store_from_dict(): - test_get_metadata_store(request_type=dict) - - -def test_get_metadata_store_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - client.get_metadata_store() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataStoreRequest() - - -@pytest.mark.asyncio -async def test_get_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataStoreRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore( - name='name_value', - description='description_value', - )) - response = await client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_store.MetadataStore) - assert response.name == 'name_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_metadata_store_async_from_dict(): - await test_get_metadata_store_async(request_type=dict) - - -def test_get_metadata_store_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - call.return_value = metadata_store.MetadataStore() - client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_metadata_store_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) - await client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_metadata_store_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_store.MetadataStore() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_metadata_store_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_metadata_store( - metadata_service.GetMetadataStoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_metadata_store_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_store.MetadataStore() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_metadata_store_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_metadata_store( - metadata_service.GetMetadataStoreRequest(), - name='name_value', - ) - - -def test_list_metadata_stores(transport: str = 'grpc', request_type=metadata_service.ListMetadataStoresRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataStoresResponse( - next_page_token='next_page_token_value', - ) - response = client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataStoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataStoresPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_metadata_stores_from_dict(): - test_list_metadata_stores(request_type=dict) - - -def test_list_metadata_stores_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - client.list_metadata_stores() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataStoresRequest() - - -@pytest.mark.asyncio -async def test_list_metadata_stores_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataStoresRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataStoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataStoresAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_metadata_stores_async_from_dict(): - await test_list_metadata_stores_async(request_type=dict) - - -def test_list_metadata_stores_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataStoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - call.return_value = metadata_service.ListMetadataStoresResponse() - client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_metadata_stores_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataStoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) - await client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_metadata_stores_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataStoresResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_metadata_stores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_metadata_stores_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_metadata_stores( - metadata_service.ListMetadataStoresRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_metadata_stores_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataStoresResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_metadata_stores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_metadata_stores_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_metadata_stores( - metadata_service.ListMetadataStoresRequest(), - parent='parent_value', - ) - - -def test_list_metadata_stores_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_metadata_stores(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, metadata_store.MetadataStore) - for i in results) - -def test_list_metadata_stores_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - pages = list(client.list_metadata_stores(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_metadata_stores_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_metadata_stores(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, metadata_store.MetadataStore) - for i in responses) - -@pytest.mark.asyncio -async def test_list_metadata_stores_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_metadata_stores(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_metadata_store(transport: str = 'grpc', request_type=metadata_service.DeleteMetadataStoreRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_metadata_store_from_dict(): - test_delete_metadata_store(request_type=dict) - - -def test_delete_metadata_store_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - client.delete_metadata_store() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteMetadataStoreRequest() - - -@pytest.mark.asyncio -async def test_delete_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteMetadataStoreRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_metadata_store_async_from_dict(): - await test_delete_metadata_store_async(request_type=dict) - - -def test_delete_metadata_store_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_metadata_store_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_metadata_store_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_metadata_store_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_metadata_store( - metadata_service.DeleteMetadataStoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_metadata_store_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_metadata_store_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_metadata_store( - metadata_service.DeleteMetadataStoreRequest(), - name='name_value', - ) - - -def test_create_artifact(transport: str = 'grpc', request_type=metadata_service.CreateArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_create_artifact_from_dict(): - test_create_artifact(request_type=dict) - - -def test_create_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - client.create_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateArtifactRequest() - - -@pytest.mark.asyncio -async def test_create_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_artifact_async_from_dict(): - await test_create_artifact_async(request_type=dict) - - -def test_create_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateArtifactRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - call.return_value = gca_artifact.Artifact() - client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateArtifactRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - await client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_artifact( - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].artifact == gca_artifact.Artifact(name='name_value') - assert args[0].artifact_id == 'artifact_id_value' - - -def test_create_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_artifact( - metadata_service.CreateArtifactRequest(), - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_artifact( - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].artifact == gca_artifact.Artifact(name='name_value') - assert args[0].artifact_id == 'artifact_id_value' - - -@pytest.mark.asyncio -async def test_create_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_artifact( - metadata_service.CreateArtifactRequest(), - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - -def test_get_artifact(transport: str = 'grpc', request_type=metadata_service.GetArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_get_artifact_from_dict(): - test_get_artifact(request_type=dict) - - -def test_get_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - client.get_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetArtifactRequest() - - -@pytest.mark.asyncio -async def test_get_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_artifact_async_from_dict(): - await test_get_artifact_async(request_type=dict) - - -def test_get_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - call.return_value = artifact.Artifact() - client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) - await client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = artifact.Artifact() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_artifact( - metadata_service.GetArtifactRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = artifact.Artifact() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_artifact( - metadata_service.GetArtifactRequest(), - name='name_value', - ) - - -def test_list_artifacts(transport: str = 'grpc', request_type=metadata_service.ListArtifactsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListArtifactsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListArtifactsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_artifacts_from_dict(): - test_list_artifacts(request_type=dict) - - -def test_list_artifacts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - client.list_artifacts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListArtifactsRequest() - - -@pytest.mark.asyncio -async def test_list_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListArtifactsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListArtifactsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_artifacts_async_from_dict(): - await test_list_artifacts_async(request_type=dict) - - -def test_list_artifacts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - call.return_value = metadata_service.ListArtifactsResponse() - client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_artifacts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) - await client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_artifacts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListArtifactsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_artifacts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_artifacts( - metadata_service.ListArtifactsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_artifacts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListArtifactsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_artifacts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_artifacts( - metadata_service.ListArtifactsRequest(), - parent='parent_value', - ) - - -def test_list_artifacts_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_artifacts(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, artifact.Artifact) - for i in results) - -def test_list_artifacts_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - pages = list(client.list_artifacts(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_artifacts_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_artifacts(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, artifact.Artifact) - for i in responses) - -@pytest.mark.asyncio -async def test_list_artifacts_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_artifacts(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_artifact(transport: str = 'grpc', request_type=metadata_service.UpdateArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_update_artifact_from_dict(): - test_update_artifact(request_type=dict) - - -def test_update_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - client.update_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateArtifactRequest() - - -@pytest.mark.asyncio -async def test_update_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_update_artifact_async_from_dict(): - await test_update_artifact_async(request_type=dict) - - -def test_update_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateArtifactRequest() - - request.artifact.name = 'artifact.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - call.return_value = gca_artifact.Artifact() - client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact.name=artifact.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateArtifactRequest() - - request.artifact.name = 'artifact.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - await client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact.name=artifact.name/value', - ) in kw['metadata'] - - -def test_update_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_artifact( - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].artifact == gca_artifact.Artifact(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_artifact( - metadata_service.UpdateArtifactRequest(), - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_artifact( - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].artifact == gca_artifact.Artifact(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_artifact( - metadata_service.UpdateArtifactRequest(), - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_create_context(transport: str = 'grpc', request_type=metadata_service.CreateContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_create_context_from_dict(): - test_create_context(request_type=dict) - - -def test_create_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - client.create_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateContextRequest() - - -@pytest.mark.asyncio -async def test_create_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_context_async_from_dict(): - await test_create_context_async(request_type=dict) - - -def test_create_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateContextRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - call.return_value = gca_context.Context() - client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateContextRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - await client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_context( - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].context == gca_context.Context(name='name_value') - assert args[0].context_id == 'context_id_value' - - -def test_create_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_context( - metadata_service.CreateContextRequest(), - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_context( - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].context == gca_context.Context(name='name_value') - assert args[0].context_id == 'context_id_value' - - -@pytest.mark.asyncio -async def test_create_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_context( - metadata_service.CreateContextRequest(), - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - -def test_get_context(transport: str = 'grpc', request_type=metadata_service.GetContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_get_context_from_dict(): - test_get_context(request_type=dict) - - -def test_get_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - client.get_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetContextRequest() - - -@pytest.mark.asyncio -async def test_get_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_context_async_from_dict(): - await test_get_context_async(request_type=dict) - - -def test_get_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - call.return_value = context.Context() - client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) - await client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = context.Context() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_context( - metadata_service.GetContextRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = context.Context() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_context( - metadata_service.GetContextRequest(), - name='name_value', - ) - - -def test_list_contexts(transport: str = 'grpc', request_type=metadata_service.ListContextsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListContextsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListContextsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_contexts_from_dict(): - test_list_contexts(request_type=dict) - - -def test_list_contexts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - client.list_contexts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListContextsRequest() - - -@pytest.mark.asyncio -async def test_list_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListContextsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListContextsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_contexts_async_from_dict(): - await test_list_contexts_async(request_type=dict) - - -def test_list_contexts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - call.return_value = metadata_service.ListContextsResponse() - client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_contexts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) - await client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_contexts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListContextsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_contexts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_contexts( - metadata_service.ListContextsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_contexts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListContextsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_contexts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_contexts( - metadata_service.ListContextsRequest(), - parent='parent_value', - ) - - -def test_list_contexts_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_contexts(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, context.Context) - for i in results) - -def test_list_contexts_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - pages = list(client.list_contexts(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_contexts_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_contexts(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, context.Context) - for i in responses) - -@pytest.mark.asyncio -async def test_list_contexts_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_contexts(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_context(transport: str = 'grpc', request_type=metadata_service.UpdateContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_update_context_from_dict(): - test_update_context(request_type=dict) - - -def test_update_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - client.update_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateContextRequest() - - -@pytest.mark.asyncio -async def test_update_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_update_context_async_from_dict(): - await test_update_context_async(request_type=dict) - - -def test_update_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateContextRequest() - - request.context.name = 'context.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - call.return_value = gca_context.Context() - client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context.name=context.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateContextRequest() - - request.context.name = 'context.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - await client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context.name=context.name/value', - ) in kw['metadata'] - - -def test_update_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_context( - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].context == gca_context.Context(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_context( - metadata_service.UpdateContextRequest(), - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_context( - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].context == gca_context.Context(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_context( - metadata_service.UpdateContextRequest(), - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_context(transport: str = 'grpc', request_type=metadata_service.DeleteContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_context_from_dict(): - test_delete_context(request_type=dict) - - -def test_delete_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - client.delete_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteContextRequest() - - -@pytest.mark.asyncio -async def test_delete_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_context_async_from_dict(): - await test_delete_context_async(request_type=dict) - - -def test_delete_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_context( - metadata_service.DeleteContextRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_context( - metadata_service.DeleteContextRequest(), - name='name_value', - ) - - -def test_add_context_artifacts_and_executions(transport: str = 'grpc', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse( - ) - response = client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) - - -def test_add_context_artifacts_and_executions_from_dict(): - test_add_context_artifacts_and_executions(request_type=dict) - - -def test_add_context_artifacts_and_executions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - client.add_context_artifacts_and_executions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse( - )) - response = await client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_async_from_dict(): - await test_add_context_artifacts_and_executions_async(request_type=dict) - - -def test_add_context_artifacts_and_executions_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextArtifactsAndExecutionsRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() - client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextArtifactsAndExecutionsRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) - await client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -def test_add_context_artifacts_and_executions_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.add_context_artifacts_and_executions( - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].context == 'context_value' - assert args[0].artifacts == ['artifacts_value'] - assert args[0].executions == ['executions_value'] - - -def test_add_context_artifacts_and_executions_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.add_context_artifacts_and_executions( - metadata_service.AddContextArtifactsAndExecutionsRequest(), - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.add_context_artifacts_and_executions( - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].context == 'context_value' - assert args[0].artifacts == ['artifacts_value'] - assert args[0].executions == ['executions_value'] - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.add_context_artifacts_and_executions( - metadata_service.AddContextArtifactsAndExecutionsRequest(), - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - -def test_add_context_children(transport: str = 'grpc', request_type=metadata_service.AddContextChildrenRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextChildrenResponse( - ) - response = client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextChildrenRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextChildrenResponse) - - -def test_add_context_children_from_dict(): - test_add_context_children(request_type=dict) - - -def test_add_context_children_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - client.add_context_children() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextChildrenRequest() - - -@pytest.mark.asyncio -async def test_add_context_children_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextChildrenRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse( - )) - response = await client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextChildrenRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextChildrenResponse) - - -@pytest.mark.asyncio -async def test_add_context_children_async_from_dict(): - await test_add_context_children_async(request_type=dict) - - -def test_add_context_children_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextChildrenRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - call.return_value = metadata_service.AddContextChildrenResponse() - client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_context_children_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextChildrenRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) - await client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -def test_add_context_children_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextChildrenResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.add_context_children( - context='context_value', - child_contexts=['child_contexts_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].context == 'context_value' - assert args[0].child_contexts == ['child_contexts_value'] - - -def test_add_context_children_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.add_context_children( - metadata_service.AddContextChildrenRequest(), - context='context_value', - child_contexts=['child_contexts_value'], - ) - - -@pytest.mark.asyncio -async def test_add_context_children_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextChildrenResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.add_context_children( - context='context_value', - child_contexts=['child_contexts_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].context == 'context_value' - assert args[0].child_contexts == ['child_contexts_value'] - - -@pytest.mark.asyncio -async def test_add_context_children_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.add_context_children( - metadata_service.AddContextChildrenRequest(), - context='context_value', - child_contexts=['child_contexts_value'], - ) - - -def test_query_context_lineage_subgraph(transport: str = 'grpc', request_type=metadata_service.QueryContextLineageSubgraphRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph( - ) - response = client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -def test_query_context_lineage_subgraph_from_dict(): - test_query_context_lineage_subgraph(request_type=dict) - - -def test_query_context_lineage_subgraph_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - client.query_context_lineage_subgraph() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryContextLineageSubgraphRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( - )) - response = await client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_async_from_dict(): - await test_query_context_lineage_subgraph_async(request_type=dict) - - -def test_query_context_lineage_subgraph_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryContextLineageSubgraphRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - call.return_value = lineage_subgraph.LineageSubgraph() - client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryContextLineageSubgraphRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - await client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -def test_query_context_lineage_subgraph_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.query_context_lineage_subgraph( - context='context_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].context == 'context_value' - - -def test_query_context_lineage_subgraph_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.query_context_lineage_subgraph( - metadata_service.QueryContextLineageSubgraphRequest(), - context='context_value', - ) - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.query_context_lineage_subgraph( - context='context_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].context == 'context_value' - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.query_context_lineage_subgraph( - metadata_service.QueryContextLineageSubgraphRequest(), - context='context_value', - ) - - -def test_create_execution(transport: str = 'grpc', request_type=metadata_service.CreateExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_create_execution_from_dict(): - test_create_execution(request_type=dict) - - -def test_create_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - client.create_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateExecutionRequest() - - -@pytest.mark.asyncio -async def test_create_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_execution_async_from_dict(): - await test_create_execution_async(request_type=dict) - - -def test_create_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateExecutionRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - call.return_value = gca_execution.Execution() - client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateExecutionRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - await client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_execution( - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].execution == gca_execution.Execution(name='name_value') - assert args[0].execution_id == 'execution_id_value' - - -def test_create_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_execution( - metadata_service.CreateExecutionRequest(), - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_execution( - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].execution == gca_execution.Execution(name='name_value') - assert args[0].execution_id == 'execution_id_value' - - -@pytest.mark.asyncio -async def test_create_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_execution( - metadata_service.CreateExecutionRequest(), - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - -def test_get_execution(transport: str = 'grpc', request_type=metadata_service.GetExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = execution.Execution( - name='name_value', - display_name='display_name_value', - state=execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_get_execution_from_dict(): - test_get_execution(request_type=dict) - - -def test_get_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - client.get_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetExecutionRequest() - - -@pytest.mark.asyncio -async def test_get_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution( - name='name_value', - display_name='display_name_value', - state=execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_execution_async_from_dict(): - await test_get_execution_async(request_type=dict) - - -def test_get_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - call.return_value = execution.Execution() - client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) - await client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = execution.Execution() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_execution( - metadata_service.GetExecutionRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = execution.Execution() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_execution( - metadata_service.GetExecutionRequest(), - name='name_value', - ) - - -def test_list_executions(transport: str = 'grpc', request_type=metadata_service.ListExecutionsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListExecutionsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListExecutionsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_executions_from_dict(): - test_list_executions(request_type=dict) - - -def test_list_executions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - client.list_executions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListExecutionsRequest() - - -@pytest.mark.asyncio -async def test_list_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListExecutionsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListExecutionsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_executions_async_from_dict(): - await test_list_executions_async(request_type=dict) - - -def test_list_executions_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - call.return_value = metadata_service.ListExecutionsResponse() - client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_executions_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) - await client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_executions_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListExecutionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_executions_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_executions( - metadata_service.ListExecutionsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_executions_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListExecutionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_executions_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_executions( - metadata_service.ListExecutionsRequest(), - parent='parent_value', - ) - - -def test_list_executions_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_executions(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, execution.Execution) - for i in results) - -def test_list_executions_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - pages = list(client.list_executions(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_executions_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_executions(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, execution.Execution) - for i in responses) - -@pytest.mark.asyncio -async def test_list_executions_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_executions(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_execution(transport: str = 'grpc', request_type=metadata_service.UpdateExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_update_execution_from_dict(): - test_update_execution(request_type=dict) - - -def test_update_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - client.update_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateExecutionRequest() - - -@pytest.mark.asyncio -async def test_update_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_update_execution_async_from_dict(): - await test_update_execution_async(request_type=dict) - - -def test_update_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateExecutionRequest() - - request.execution.name = 'execution.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - call.return_value = gca_execution.Execution() - client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution.name=execution.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateExecutionRequest() - - request.execution.name = 'execution.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - await client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution.name=execution.name/value', - ) in kw['metadata'] - - -def test_update_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_execution( - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].execution == gca_execution.Execution(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_execution( - metadata_service.UpdateExecutionRequest(), - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_execution( - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].execution == gca_execution.Execution(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_execution( - metadata_service.UpdateExecutionRequest(), - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_add_execution_events(transport: str = 'grpc', request_type=metadata_service.AddExecutionEventsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddExecutionEventsResponse( - ) - response = client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddExecutionEventsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddExecutionEventsResponse) - - -def test_add_execution_events_from_dict(): - test_add_execution_events(request_type=dict) - - -def test_add_execution_events_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - client.add_execution_events() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddExecutionEventsRequest() - - -@pytest.mark.asyncio -async def test_add_execution_events_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddExecutionEventsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse( - )) - response = await client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddExecutionEventsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddExecutionEventsResponse) - - -@pytest.mark.asyncio -async def test_add_execution_events_async_from_dict(): - await test_add_execution_events_async(request_type=dict) - - -def test_add_execution_events_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddExecutionEventsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - call.return_value = metadata_service.AddExecutionEventsResponse() - client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_execution_events_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddExecutionEventsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) - await client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -def test_add_execution_events_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddExecutionEventsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.add_execution_events( - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].execution == 'execution_value' - assert args[0].events == [event.Event(artifact='artifact_value')] - - -def test_add_execution_events_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.add_execution_events( - metadata_service.AddExecutionEventsRequest(), - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - -@pytest.mark.asyncio -async def test_add_execution_events_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddExecutionEventsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.add_execution_events( - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].execution == 'execution_value' - assert args[0].events == [event.Event(artifact='artifact_value')] - - -@pytest.mark.asyncio -async def test_add_execution_events_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.add_execution_events( - metadata_service.AddExecutionEventsRequest(), - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - -def test_query_execution_inputs_and_outputs(transport: str = 'grpc', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph( - ) - response = client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -def test_query_execution_inputs_and_outputs_from_dict(): - test_query_execution_inputs_and_outputs(request_type=dict) - - -def test_query_execution_inputs_and_outputs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - client.query_execution_inputs_and_outputs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( - )) - response = await client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_async_from_dict(): - await test_query_execution_inputs_and_outputs_async(request_type=dict) - - -def test_query_execution_inputs_and_outputs_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryExecutionInputsAndOutputsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - call.return_value = lineage_subgraph.LineageSubgraph() - client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryExecutionInputsAndOutputsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - await client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -def test_query_execution_inputs_and_outputs_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.query_execution_inputs_and_outputs( - execution='execution_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].execution == 'execution_value' - - -def test_query_execution_inputs_and_outputs_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.query_execution_inputs_and_outputs( - metadata_service.QueryExecutionInputsAndOutputsRequest(), - execution='execution_value', - ) - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.query_execution_inputs_and_outputs( - execution='execution_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].execution == 'execution_value' - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.query_execution_inputs_and_outputs( - metadata_service.QueryExecutionInputsAndOutputsRequest(), - execution='execution_value', - ) - - -def test_create_metadata_schema(transport: str = 'grpc', request_type=metadata_service.CreateMetadataSchemaRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - ) - response = client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -def test_create_metadata_schema_from_dict(): - test_create_metadata_schema(request_type=dict) - - -def test_create_metadata_schema_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - client.create_metadata_schema() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataSchemaRequest() - - -@pytest.mark.asyncio -async def test_create_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataSchemaRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - )) - response = await client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_metadata_schema_async_from_dict(): - await test_create_metadata_schema_async(request_type=dict) - - -def test_create_metadata_schema_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataSchemaRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - call.return_value = gca_metadata_schema.MetadataSchema() - client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_metadata_schema_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataSchemaRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) - await client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_metadata_schema_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_metadata_schema.MetadataSchema() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_metadata_schema( - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema(name='name_value') - assert args[0].metadata_schema_id == 'metadata_schema_id_value' - - -def test_create_metadata_schema_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_metadata_schema( - metadata_service.CreateMetadataSchemaRequest(), - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_metadata_schema_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_metadata_schema.MetadataSchema() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_metadata_schema( - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].metadata_schema == gca_metadata_schema.MetadataSchema(name='name_value') - assert args[0].metadata_schema_id == 'metadata_schema_id_value' - - -@pytest.mark.asyncio -async def test_create_metadata_schema_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_metadata_schema( - metadata_service.CreateMetadataSchemaRequest(), - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - -def test_get_metadata_schema(transport: str = 'grpc', request_type=metadata_service.GetMetadataSchemaRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - ) - response = client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -def test_get_metadata_schema_from_dict(): - test_get_metadata_schema(request_type=dict) - - -def test_get_metadata_schema_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - client.get_metadata_schema() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataSchemaRequest() - - -@pytest.mark.asyncio -async def test_get_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataSchemaRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - )) - response = await client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_metadata_schema_async_from_dict(): - await test_get_metadata_schema_async(request_type=dict) - - -def test_get_metadata_schema_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataSchemaRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - call.return_value = metadata_schema.MetadataSchema() - client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_metadata_schema_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataSchemaRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) - await client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_metadata_schema_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_schema.MetadataSchema() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_metadata_schema( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_metadata_schema_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_metadata_schema( - metadata_service.GetMetadataSchemaRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_metadata_schema_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_schema.MetadataSchema() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_metadata_schema( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_metadata_schema_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_metadata_schema( - metadata_service.GetMetadataSchemaRequest(), - name='name_value', - ) - - -def test_list_metadata_schemas(transport: str = 'grpc', request_type=metadata_service.ListMetadataSchemasRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataSchemasResponse( - next_page_token='next_page_token_value', - ) - response = client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataSchemasRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataSchemasPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_metadata_schemas_from_dict(): - test_list_metadata_schemas(request_type=dict) - - -def test_list_metadata_schemas_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - client.list_metadata_schemas() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataSchemasRequest() - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataSchemasRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataSchemasRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataSchemasAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async_from_dict(): - await test_list_metadata_schemas_async(request_type=dict) - - -def test_list_metadata_schemas_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataSchemasRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - call.return_value = metadata_service.ListMetadataSchemasResponse() - client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataSchemasRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) - await client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_metadata_schemas_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataSchemasResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_metadata_schemas( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_metadata_schemas_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_metadata_schemas( - metadata_service.ListMetadataSchemasRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataSchemasResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_metadata_schemas( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_metadata_schemas( - metadata_service.ListMetadataSchemasRequest(), - parent='parent_value', - ) - - -def test_list_metadata_schemas_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_metadata_schemas(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, metadata_schema.MetadataSchema) - for i in results) - -def test_list_metadata_schemas_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - pages = list(client.list_metadata_schemas(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_metadata_schemas(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, metadata_schema.MetadataSchema) - for i in responses) - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_metadata_schemas(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_query_artifact_lineage_subgraph(transport: str = 'grpc', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph( - ) - response = client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -def test_query_artifact_lineage_subgraph_from_dict(): - test_query_artifact_lineage_subgraph(request_type=dict) - - -def test_query_artifact_lineage_subgraph_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - client.query_artifact_lineage_subgraph() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( - )) - response = await client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_async_from_dict(): - await test_query_artifact_lineage_subgraph_async(request_type=dict) - - -def test_query_artifact_lineage_subgraph_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryArtifactLineageSubgraphRequest() - - request.artifact = 'artifact/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - call.return_value = lineage_subgraph.LineageSubgraph() - client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact=artifact/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryArtifactLineageSubgraphRequest() - - request.artifact = 'artifact/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - await client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact=artifact/value', - ) in kw['metadata'] - - -def test_query_artifact_lineage_subgraph_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.query_artifact_lineage_subgraph( - artifact='artifact_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].artifact == 'artifact_value' - - -def test_query_artifact_lineage_subgraph_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.query_artifact_lineage_subgraph( - metadata_service.QueryArtifactLineageSubgraphRequest(), - artifact='artifact_value', - ) - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.query_artifact_lineage_subgraph( - artifact='artifact_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].artifact == 'artifact_value' - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.query_artifact_lineage_subgraph( - metadata_service.QueryArtifactLineageSubgraphRequest(), - artifact='artifact_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MetadataServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MetadataServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = MetadataServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.MetadataServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.MetadataServiceGrpcTransport, - transports.MetadataServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.MetadataServiceGrpcTransport, - ) - -def test_metadata_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.MetadataServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_metadata_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.MetadataServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_metadata_store', - 'get_metadata_store', - 'list_metadata_stores', - 'delete_metadata_store', - 'create_artifact', - 'get_artifact', - 'list_artifacts', - 'update_artifact', - 'create_context', - 'get_context', - 'list_contexts', - 'update_context', - 'delete_context', - 'add_context_artifacts_and_executions', - 'add_context_children', - 'query_context_lineage_subgraph', - 'create_execution', - 'get_execution', - 'list_executions', - 'update_execution', - 'add_execution_events', - 'query_execution_inputs_and_outputs', - 'create_metadata_schema', - 'get_metadata_schema', - 'list_metadata_schemas', - 'query_artifact_lineage_subgraph', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_metadata_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MetadataServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_metadata_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MetadataServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_metadata_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MetadataServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_metadata_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MetadataServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_metadata_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MetadataServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MetadataServiceGrpcTransport, - transports.MetadataServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_metadata_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MetadataServiceGrpcTransport, - transports.MetadataServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_metadata_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.MetadataServiceGrpcTransport, grpc_helpers), - (transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_metadata_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) -def test_metadata_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_metadata_service_host_no_port(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_metadata_service_host_with_port(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_metadata_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MetadataServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_metadata_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MetadataServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) -def test_metadata_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) -def test_metadata_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_metadata_service_grpc_lro_client(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_metadata_service_grpc_lro_async_client(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_artifact_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - artifact = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - actual = MetadataServiceClient.artifact_path(project, location, metadata_store, artifact) - assert expected == actual - - -def test_parse_artifact_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "artifact": "mussel", - } - path = MetadataServiceClient.artifact_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_artifact_path(path) - assert expected == actual - -def test_context_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - context = "abalone" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - actual = MetadataServiceClient.context_path(project, location, metadata_store, context) - assert expected == actual - - -def test_parse_context_path(): - expected = { - "project": "squid", - "location": "clam", - "metadata_store": "whelk", - "context": "octopus", - } - path = MetadataServiceClient.context_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_context_path(path) - assert expected == actual - -def test_execution_path(): - project = "oyster" - location = "nudibranch" - metadata_store = "cuttlefish" - execution = "mussel" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - actual = MetadataServiceClient.execution_path(project, location, metadata_store, execution) - assert expected == actual - - -def test_parse_execution_path(): - expected = { - "project": "winkle", - "location": "nautilus", - "metadata_store": "scallop", - "execution": "abalone", - } - path = MetadataServiceClient.execution_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_execution_path(path) - assert expected == actual - -def test_metadata_schema_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - metadata_schema = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) - actual = MetadataServiceClient.metadata_schema_path(project, location, metadata_store, metadata_schema) - assert expected == actual - - -def test_parse_metadata_schema_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "metadata_schema": "mussel", - } - path = MetadataServiceClient.metadata_schema_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_metadata_schema_path(path) - assert expected == actual - -def test_metadata_store_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) - actual = MetadataServiceClient.metadata_store_path(project, location, metadata_store) - assert expected == actual - - -def test_parse_metadata_store_path(): - expected = { - "project": "abalone", - "location": "squid", - "metadata_store": "clam", - } - path = MetadataServiceClient.metadata_store_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_metadata_store_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "whelk" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = MetadataServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "octopus", - } - path = MetadataServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "oyster" - expected = "folders/{folder}".format(folder=folder, ) - actual = MetadataServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nudibranch", - } - path = MetadataServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "cuttlefish" - expected = "organizations/{organization}".format(organization=organization, ) - actual = MetadataServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "mussel", - } - path = MetadataServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "winkle" - expected = "projects/{project}".format(project=project, ) - actual = MetadataServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nautilus", - } - path = MetadataServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "scallop" - location = "abalone" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = MetadataServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "squid", - "location": "clam", - } - path = MetadataServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = MetadataServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py deleted file mode 100644 index c99db01922..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ /dev/null @@ -1,1753 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.migration_service import MigrationServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.migration_service import MigrationServiceClient -from google.cloud.aiplatform_v1beta1.services.migration_service import pagers -from google.cloud.aiplatform_v1beta1.services.migration_service import transports -from google.cloud.aiplatform_v1beta1.services.migration_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import migratable_resource -from google.cloud.aiplatform_v1beta1.types import migration_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert MigrationServiceClient._get_default_mtls_endpoint(None) is None - assert MigrationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.MigrationServiceGrpcTransport, "grpc"), - (transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_migration_service_client_get_transport_class(): - transport = MigrationServiceClient.get_transport_class() - available_transports = [ - transports.MigrationServiceGrpcTransport, - ] - assert transport in available_transports - - transport = MigrationServiceClient.get_transport_class("grpc") - assert transport == transports.MigrationServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) -@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) -def test_migration_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "true"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "false"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) -@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_migration_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_migration_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = MigrationServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_search_migratable_resources(transport: str = 'grpc', request_type=migration_service.SearchMigratableResourcesRequest): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchMigratableResourcesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_migratable_resources_from_dict(): - test_search_migratable_resources(request_type=dict) - - -def test_search_migratable_resources_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - client.search_migratable_resources() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - -@pytest.mark.asyncio -async def test_search_migratable_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.SearchMigratableResourcesRequest): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_from_dict(): - await test_search_migratable_resources_async(request_type=dict) - - -def test_search_migratable_resources_field_headers(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.SearchMigratableResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - call.return_value = migration_service.SearchMigratableResourcesResponse() - client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_migratable_resources_field_headers_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.SearchMigratableResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) - await client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_search_migratable_resources_flattened(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_migratable_resources( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_search_migratable_resources_flattened_error(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_migratable_resources( - migration_service.SearchMigratableResourcesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_search_migratable_resources_flattened_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_migratable_resources( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_search_migratable_resources_flattened_error_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_migratable_resources( - migration_service.SearchMigratableResourcesRequest(), - parent='parent_value', - ) - - -def test_search_migratable_resources_pager(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.search_migratable_resources(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, migratable_resource.MigratableResource) - for i in results) - -def test_search_migratable_resources_pages(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - pages = list(client.search_migratable_resources(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_pager(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_migratable_resources(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, migratable_resource.MigratableResource) - for i in responses) - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_pages(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_migratable_resources(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_batch_migrate_resources(transport: str = 'grpc', request_type=migration_service.BatchMigrateResourcesRequest): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_migrate_resources_from_dict(): - test_batch_migrate_resources(request_type=dict) - - -def test_batch_migrate_resources_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - client.batch_migrate_resources() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.BatchMigrateResourcesRequest): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_async_from_dict(): - await test_batch_migrate_resources_async(request_type=dict) - - -def test_batch_migrate_resources_field_headers(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.BatchMigrateResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_field_headers_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.BatchMigrateResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_migrate_resources_flattened(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_migrate_resources( - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].migrate_resource_requests == [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] - - -def test_batch_migrate_resources_flattened_error(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_migrate_resources( - migration_service.BatchMigrateResourcesRequest(), - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_flattened_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_migrate_resources( - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].migrate_resource_requests == [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_flattened_error_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_migrate_resources( - migration_service.BatchMigrateResourcesRequest(), - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = MigrationServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.MigrationServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.MigrationServiceGrpcTransport, - ) - -def test_migration_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.MigrationServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_migration_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.MigrationServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'search_migratable_resources', - 'batch_migrate_resources', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_migration_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_migration_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_migration_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_migration_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MigrationServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_migration_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MigrationServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_migration_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_migration_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.MigrationServiceGrpcTransport, grpc_helpers), - (transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_migration_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_migration_service_host_no_port(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_migration_service_host_with_port(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_migration_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MigrationServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_migration_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MigrationServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_migration_service_grpc_lro_client(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_migration_service_grpc_lro_async_client(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotated_dataset_path(): - project = "squid" - dataset = "clam" - annotated_dataset = "whelk" - expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) - actual = MigrationServiceClient.annotated_dataset_path(project, dataset, annotated_dataset) - assert expected == actual - - -def test_parse_annotated_dataset_path(): - expected = { - "project": "octopus", - "dataset": "oyster", - "annotated_dataset": "nudibranch", - } - path = MigrationServiceClient.annotated_dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_annotated_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "winkle", - "dataset": "nautilus", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "clam", - "location": "whelk", - "dataset": "octopus", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "mussel", - "location": "winkle", - "dataset": "nautilus", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_model_path(): - project = "scallop" - location = "abalone" - model = "squid" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = MigrationServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "clam", - "location": "whelk", - "model": "octopus", - } - path = MigrationServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_path(): - project = "oyster" - location = "nudibranch" - model = "cuttlefish" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = MigrationServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "mussel", - "location": "winkle", - "model": "nautilus", - } - path = MigrationServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_model_path(path) - assert expected == actual - -def test_version_path(): - project = "scallop" - model = "abalone" - version = "squid" - expected = "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) - actual = MigrationServiceClient.version_path(project, model, version) - assert expected == actual - - -def test_parse_version_path(): - expected = { - "project": "clam", - "model": "whelk", - "version": "octopus", - } - path = MigrationServiceClient.version_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_version_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "oyster" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = MigrationServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nudibranch", - } - path = MigrationServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "cuttlefish" - expected = "folders/{folder}".format(folder=folder, ) - actual = MigrationServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "mussel", - } - path = MigrationServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "winkle" - expected = "organizations/{organization}".format(organization=organization, ) - actual = MigrationServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nautilus", - } - path = MigrationServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "scallop" - expected = "projects/{project}".format(project=project, ) - actual = MigrationServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "abalone", - } - path = MigrationServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "squid" - location = "clam" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = MigrationServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "whelk", - "location": "octopus", - } - path = MigrationServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = MigrationServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py deleted file mode 100644 index 532ecafb91..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py +++ /dev/null @@ -1,4053 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.model_service import ModelServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.model_service import ModelServiceClient -from google.cloud.aiplatform_v1beta1.services.model_service import pagers -from google.cloud.aiplatform_v1beta1.services.model_service import transports -from google.cloud.aiplatform_v1beta1.services.model_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import env_var -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert ModelServiceClient._get_default_mtls_endpoint(None) is None - assert ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.ModelServiceGrpcTransport, "grpc"), - (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_model_service_client_get_transport_class(): - transport = ModelServiceClient.get_transport_class() - available_transports = [ - transports.ModelServiceGrpcTransport, - ] - assert transport in available_transports - - transport = ModelServiceClient.get_transport_class("grpc") - assert transport == transports.ModelServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) -@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) -def test_model_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) -@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_model_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_model_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = ModelServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_upload_model(transport: str = 'grpc', request_type=model_service.UploadModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_upload_model_from_dict(): - test_upload_model(request_type=dict) - - -def test_upload_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - client.upload_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - -@pytest.mark.asyncio -async def test_upload_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UploadModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_upload_model_async_from_dict(): - await test_upload_model_async(request_type=dict) - - -def test_upload_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UploadModelRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_upload_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UploadModelRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_upload_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.upload_model( - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].model == gca_model.Model(name='name_value') - - -def test_upload_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.upload_model( - model_service.UploadModelRequest(), - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_upload_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.upload_model( - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].model == gca_model.Model(name='name_value') - - -@pytest.mark.asyncio -async def test_upload_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.upload_model( - model_service.UploadModelRequest(), - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - -def test_get_model(transport: str = 'grpc', request_type=model_service.GetModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - ) - response = client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -def test_get_model_from_dict(): - test_get_model(request_type=dict) - - -def test_get_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - client.get_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - -@pytest.mark.asyncio -async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - )) - response = await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_model_async_from_dict(): - await test_get_model_async(request_type=dict) - - -def test_get_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = model.Model() - client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model( - model_service.GetModelRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model( - model_service.GetModelRequest(), - name='name_value', - ) - - -def test_list_models(transport: str = 'grpc', request_type=model_service.ListModelsRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_models_from_dict(): - test_list_models(request_type=dict) - - -def test_list_models_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - client.list_models() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - -@pytest.mark.asyncio -async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelsRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_models_async_from_dict(): - await test_list_models_async(request_type=dict) - - -def test_list_models_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = model_service.ListModelsResponse() - client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_models_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) - await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_models_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_models_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_models( - model_service.ListModelsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_models_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_models_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_models( - model_service.ListModelsRequest(), - parent='parent_value', - ) - - -def test_list_models_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_models(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model.Model) - for i in results) - -def test_list_models_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = list(client.list_models(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_models_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_models(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model.Model) - for i in responses) - -@pytest.mark.asyncio -async def test_list_models_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_models(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_model(transport: str = 'grpc', request_type=model_service.UpdateModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - ) - response = client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -def test_update_model_from_dict(): - test_update_model(request_type=dict) - - -def test_update_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - client.update_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - -@pytest.mark.asyncio -async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UpdateModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - )) - response = await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_model_async_from_dict(): - await test_update_model_async(request_type=dict) - - -def test_update_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() - - request.model.name = 'model.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = gca_model.Model() - client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=model.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() - - request.model.name = 'model.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=model.name/value', - ) in kw['metadata'] - - -def test_update_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_model( - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].model == gca_model.Model(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_model( - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].model == gca_model.Model(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_model(transport: str = 'grpc', request_type=model_service.DeleteModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_model_from_dict(): - test_delete_model(request_type=dict) - - -def test_delete_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - client.delete_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - -@pytest.mark.asyncio -async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_model_async_from_dict(): - await test_delete_model_async(request_type=dict) - - -def test_delete_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model( - model_service.DeleteModelRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_model( - model_service.DeleteModelRequest(), - name='name_value', - ) - - -def test_export_model(transport: str = 'grpc', request_type=model_service.ExportModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_model_from_dict(): - test_export_model(request_type=dict) - - -def test_export_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - client.export_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - -@pytest.mark.asyncio -async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=model_service.ExportModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_model_async_from_dict(): - await test_export_model_async(request_type=dict) - - -def test_export_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ExportModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ExportModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_export_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_model( - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].output_config == model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') - - -def test_export_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_model( - model_service.ExportModelRequest(), - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - -@pytest.mark.asyncio -async def test_export_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_model( - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - assert args[0].output_config == model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') - - -@pytest.mark.asyncio -async def test_export_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_model( - model_service.ExportModelRequest(), - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - -def test_get_model_evaluation(transport: str = 'grpc', request_type=model_service.GetModelEvaluationRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - slice_dimensions=['slice_dimensions_value'], - ) - response = client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - assert response.slice_dimensions == ['slice_dimensions_value'] - - -def test_get_model_evaluation_from_dict(): - test_get_model_evaluation(request_type=dict) - - -def test_get_model_evaluation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - client.get_model_evaluation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - slice_dimensions=['slice_dimensions_value'], - )) - response = await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - assert response.slice_dimensions == ['slice_dimensions_value'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async_from_dict(): - await test_get_model_evaluation_async(request_type=dict) - - -def test_get_model_evaluation_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = model_evaluation.ModelEvaluation() - client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_evaluation_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_model_evaluation_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation( - model_service.GetModelEvaluationRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation( - model_service.GetModelEvaluationRequest(), - name='name_value', - ) - - -def test_list_model_evaluations(transport: str = 'grpc', request_type=model_service.ListModelEvaluationsRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluations_from_dict(): - test_list_model_evaluations(request_type=dict) - - -def test_list_model_evaluations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - client.list_model_evaluations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationsRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_from_dict(): - await test_list_model_evaluations_async(request_type=dict) - - -def test_list_model_evaluations_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = model_service.ListModelEvaluationsResponse() - client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluations_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) - await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_evaluations_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_model_evaluations_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluations( - model_service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluations( - model_service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluations_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluations(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in results) - -def test_list_model_evaluations_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_evaluations(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_model_evaluation_slice(transport: str = 'grpc', request_type=model_service.GetModelEvaluationSliceRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - ) - response = client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - - -def test_get_model_evaluation_slice_from_dict(): - test_get_model_evaluation_slice(request_type=dict) - - -def test_get_model_evaluation_slice_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - client.get_model_evaluation_slice() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationSliceRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - )) - response = await client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_async_from_dict(): - await test_get_model_evaluation_slice_async(request_type=dict) - - -def test_get_model_evaluation_slice_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationSliceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationSliceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) - await client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_evaluation_slice_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation_slice( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_model_evaluation_slice_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation_slice( - model_service.GetModelEvaluationSliceRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation_slice( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation_slice( - model_service.GetModelEvaluationSliceRequest(), - name='name_value', - ) - - -def test_list_model_evaluation_slices(transport: str = 'grpc', request_type=model_service.ListModelEvaluationSlicesRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationSlicesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluation_slices_from_dict(): - test_list_model_evaluation_slices(request_type=dict) - - -def test_list_model_evaluation_slices_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - client.list_model_evaluation_slices() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationSlicesRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationSlicesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_from_dict(): - await test_list_model_evaluation_slices_async(request_type=dict) - - -def test_list_model_evaluation_slices_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationSlicesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - call.return_value = model_service.ListModelEvaluationSlicesResponse() - client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationSlicesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) - await client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_evaluation_slices_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluation_slices( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_model_evaluation_slices_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluation_slices( - model_service.ListModelEvaluationSlicesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluation_slices( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluation_slices( - model_service.ListModelEvaluationSlicesRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluation_slices_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluation_slices(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) - for i in results) - -def test_list_model_evaluation_slices_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluation_slices(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluation_slices(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_evaluation_slices(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = ModelServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.ModelServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.ModelServiceGrpcTransport, - ) - -def test_model_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.ModelServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_model_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.ModelServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'upload_model', - 'get_model', - 'list_models', - 'update_model', - 'delete_model', - 'export_model', - 'get_model_evaluation', - 'list_model_evaluations', - 'get_model_evaluation_slice', - 'list_model_evaluation_slices', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_model_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_model_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_model_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_model_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ModelServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_model_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ModelServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_model_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_model_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.ModelServiceGrpcTransport, grpc_helpers), - (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_model_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_model_service_host_no_port(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_model_service_host_with_port(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_model_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ModelServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_model_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ModelServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_model_service_grpc_lro_client(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_model_service_grpc_lro_async_client(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = ModelServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = ModelServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = ModelServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = ModelServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_evaluation_path(): - project = "squid" - location = "clam" - model = "whelk" - evaluation = "octopus" - expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) - actual = ModelServiceClient.model_evaluation_path(project, location, model, evaluation) - assert expected == actual - - -def test_parse_model_evaluation_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "model": "cuttlefish", - "evaluation": "mussel", - } - path = ModelServiceClient.model_evaluation_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_evaluation_path(path) - assert expected == actual - -def test_model_evaluation_slice_path(): - project = "winkle" - location = "nautilus" - model = "scallop" - evaluation = "abalone" - slice = "squid" - expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) - actual = ModelServiceClient.model_evaluation_slice_path(project, location, model, evaluation, slice) - assert expected == actual - - -def test_parse_model_evaluation_slice_path(): - expected = { - "project": "clam", - "location": "whelk", - "model": "octopus", - "evaluation": "oyster", - "slice": "nudibranch", - } - path = ModelServiceClient.model_evaluation_slice_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_evaluation_slice_path(path) - assert expected == actual - -def test_training_pipeline_path(): - project = "cuttlefish" - location = "mussel" - training_pipeline = "winkle" - expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - actual = ModelServiceClient.training_pipeline_path(project, location, training_pipeline) - assert expected == actual - - -def test_parse_training_pipeline_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "training_pipeline": "abalone", - } - path = ModelServiceClient.training_pipeline_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_training_pipeline_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = ModelServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = ModelServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = ModelServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = ModelServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = ModelServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = ModelServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = ModelServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = ModelServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = ModelServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = ModelServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = ModelServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py deleted file mode 100644 index ea7e516df1..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py +++ /dev/null @@ -1,3918 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.pipeline_service import PipelineServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.pipeline_service import PipelineServiceClient -from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1beta1.services.pipeline_service import transports -from google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import env_var -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.cloud.aiplatform_v1beta1.types import value -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PipelineServiceClient._get_default_mtls_endpoint(None) is None - assert PipelineServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PipelineServiceGrpcTransport, "grpc"), - (transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_pipeline_service_client_get_transport_class(): - transport = PipelineServiceClient.get_transport_class() - available_transports = [ - transports.PipelineServiceGrpcTransport, - ] - assert transport in available_transports - - transport = PipelineServiceClient.get_transport_class("grpc") - assert transport == transports.PipelineServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) -@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) -def test_pipeline_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "true"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "false"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) -@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_pipeline_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_pipeline_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PipelineServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CreateTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - ) - response = client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -def test_create_training_pipeline_from_dict(): - test_create_training_pipeline(request_type=dict) - - -def test_create_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - client.create_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_create_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreateTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - )) - response = await client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_training_pipeline_async_from_dict(): - await test_create_training_pipeline_async(request_type=dict) - - -def test_create_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreateTrainingPipelineRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - call.return_value = gca_training_pipeline.TrainingPipeline() - client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreateTrainingPipelineRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) - await client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_training_pipeline( - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].training_pipeline == gca_training_pipeline.TrainingPipeline(name='name_value') - - -def test_create_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_training_pipeline( - pipeline_service.CreateTrainingPipelineRequest(), - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_training_pipeline( - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].training_pipeline == gca_training_pipeline.TrainingPipeline(name='name_value') - - -@pytest.mark.asyncio -async def test_create_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_training_pipeline( - pipeline_service.CreateTrainingPipelineRequest(), - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - -def test_get_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.GetTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - ) - response = client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -def test_get_training_pipeline_from_dict(): - test_get_training_pipeline(request_type=dict) - - -def test_get_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - client.get_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_get_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - )) - response = await client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_training_pipeline_async_from_dict(): - await test_get_training_pipeline_async(request_type=dict) - - -def test_get_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - call.return_value = training_pipeline.TrainingPipeline() - client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) - await client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_training_pipeline( - pipeline_service.GetTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_training_pipeline( - pipeline_service.GetTrainingPipelineRequest(), - name='name_value', - ) - - -def test_list_training_pipelines(transport: str = 'grpc', request_type=pipeline_service.ListTrainingPipelinesRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrainingPipelinesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_training_pipelines_from_dict(): - test_list_training_pipelines(request_type=dict) - - -def test_list_training_pipelines_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - client.list_training_pipelines() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - -@pytest.mark.asyncio -async def test_list_training_pipelines_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListTrainingPipelinesRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_from_dict(): - await test_list_training_pipelines_async(request_type=dict) - - -def test_list_training_pipelines_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListTrainingPipelinesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_training_pipelines_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListTrainingPipelinesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) - await client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_training_pipelines_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_training_pipelines( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_training_pipelines_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_training_pipelines( - pipeline_service.ListTrainingPipelinesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_training_pipelines_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_training_pipelines( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_training_pipelines_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_training_pipelines( - pipeline_service.ListTrainingPipelinesRequest(), - parent='parent_value', - ) - - -def test_list_training_pipelines_pager(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_training_pipelines(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, training_pipeline.TrainingPipeline) - for i in results) - -def test_list_training_pipelines_pages(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - pages = list(client.list_training_pipelines(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_pager(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_training_pipelines(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, training_pipeline.TrainingPipeline) - for i in responses) - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_pages(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_training_pipelines(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.DeleteTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_training_pipeline_from_dict(): - test_delete_training_pipeline(request_type=dict) - - -def test_delete_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - client.delete_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeleteTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_async_from_dict(): - await test_delete_training_pipeline_async(request_type=dict) - - -def test_delete_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeleteTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeleteTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_training_pipeline( - pipeline_service.DeleteTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_training_pipeline( - pipeline_service.DeleteTrainingPipelineRequest(), - name='name_value', - ) - - -def test_cancel_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CancelTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_training_pipeline_from_dict(): - test_cancel_training_pipeline(request_type=dict) - - -def test_cancel_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - client.cancel_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_async_from_dict(): - await test_cancel_training_pipeline_async(request_type=dict) - - -def test_cancel_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - call.return_value = None - client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_training_pipeline( - pipeline_service.CancelTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_training_pipeline( - pipeline_service.CancelTrainingPipelineRequest(), - name='name_value', - ) - - -def test_create_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CreatePipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - ) - response = client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -def test_create_pipeline_job_from_dict(): - test_create_pipeline_job(request_type=dict) - - -def test_create_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - client.create_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - -@pytest.mark.asyncio -async def test_create_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreatePipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - )) - response = await client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_create_pipeline_job_async_from_dict(): - await test_create_pipeline_job_async(request_type=dict) - - -def test_create_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreatePipelineJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - call.return_value = gca_pipeline_job.PipelineJob() - client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreatePipelineJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) - await client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_pipeline_job( - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name='name_value') - assert args[0].pipeline_job_id == 'pipeline_job_id_value' - - -def test_create_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_pipeline_job( - pipeline_service.CreatePipelineJobRequest(), - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_pipeline_job( - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name='name_value') - assert args[0].pipeline_job_id == 'pipeline_job_id_value' - - -@pytest.mark.asyncio -async def test_create_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_pipeline_job( - pipeline_service.CreatePipelineJobRequest(), - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - -def test_get_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.GetPipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - ) - response = client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -def test_get_pipeline_job_from_dict(): - test_get_pipeline_job(request_type=dict) - - -def test_get_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - client.get_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - -@pytest.mark.asyncio -async def test_get_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetPipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - )) - response = await client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_get_pipeline_job_async_from_dict(): - await test_get_pipeline_job_async(request_type=dict) - - -def test_get_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - call.return_value = pipeline_job.PipelineJob() - client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) - await client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_pipeline_job( - pipeline_service.GetPipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_pipeline_job( - pipeline_service.GetPipelineJobRequest(), - name='name_value', - ) - - -def test_list_pipeline_jobs(transport: str = 'grpc', request_type=pipeline_service.ListPipelineJobsRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListPipelineJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_pipeline_jobs_from_dict(): - test_list_pipeline_jobs(request_type=dict) - - -def test_list_pipeline_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - client.list_pipeline_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListPipelineJobsRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListPipelineJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_from_dict(): - await test_list_pipeline_jobs_async(request_type=dict) - - -def test_list_pipeline_jobs_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListPipelineJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - call.return_value = pipeline_service.ListPipelineJobsResponse() - client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListPipelineJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) - await client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_pipeline_jobs_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_pipeline_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_pipeline_jobs_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_pipeline_jobs( - pipeline_service.ListPipelineJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_pipeline_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_pipeline_jobs( - pipeline_service.ListPipelineJobsRequest(), - parent='parent_value', - ) - - -def test_list_pipeline_jobs_pager(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_pipeline_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, pipeline_job.PipelineJob) - for i in results) - -def test_list_pipeline_jobs_pages(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_pipeline_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_pager(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_pipeline_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, pipeline_job.PipelineJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_pages(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_pipeline_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.DeletePipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_pipeline_job_from_dict(): - test_delete_pipeline_job(request_type=dict) - - -def test_delete_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - client.delete_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeletePipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_async_from_dict(): - await test_delete_pipeline_job_async(request_type=dict) - - -def test_delete_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeletePipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeletePipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_pipeline_job( - pipeline_service.DeletePipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_pipeline_job( - pipeline_service.DeletePipelineJobRequest(), - name='name_value', - ) - - -def test_cancel_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CancelPipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_pipeline_job_from_dict(): - test_cancel_pipeline_job(request_type=dict) - - -def test_cancel_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - client.cancel_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelPipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_async_from_dict(): - await test_cancel_pipeline_job_async(request_type=dict) - - -def test_cancel_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - call.return_value = None - client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_cancel_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_pipeline_job( - pipeline_service.CancelPipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_pipeline_job( - pipeline_service.CancelPipelineJobRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PipelineServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PipelineServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PipelineServiceGrpcTransport, - ) - -def test_pipeline_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PipelineServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_pipeline_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PipelineServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_training_pipeline', - 'get_training_pipeline', - 'list_training_pipelines', - 'delete_training_pipeline', - 'cancel_training_pipeline', - 'create_pipeline_job', - 'get_pipeline_job', - 'list_pipeline_jobs', - 'delete_pipeline_job', - 'cancel_pipeline_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_pipeline_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_pipeline_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_pipeline_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_pipeline_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PipelineServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_pipeline_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PipelineServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_pipeline_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_pipeline_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PipelineServiceGrpcTransport, grpc_helpers), - (transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_pipeline_service_host_no_port(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_pipeline_service_host_with_port(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_pipeline_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PipelineServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_pipeline_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PipelineServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_pipeline_service_grpc_lro_client(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_pipeline_service_grpc_lro_async_client(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_artifact_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - artifact = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - actual = PipelineServiceClient.artifact_path(project, location, metadata_store, artifact) - assert expected == actual - - -def test_parse_artifact_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "artifact": "mussel", - } - path = PipelineServiceClient.artifact_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_artifact_path(path) - assert expected == actual - -def test_context_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - context = "abalone" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - actual = PipelineServiceClient.context_path(project, location, metadata_store, context) - assert expected == actual - - -def test_parse_context_path(): - expected = { - "project": "squid", - "location": "clam", - "metadata_store": "whelk", - "context": "octopus", - } - path = PipelineServiceClient.context_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_context_path(path) - assert expected == actual - -def test_custom_job_path(): - project = "oyster" - location = "nudibranch" - custom_job = "cuttlefish" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = PipelineServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "mussel", - "location": "winkle", - "custom_job": "nautilus", - } - path = PipelineServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_endpoint_path(): - project = "scallop" - location = "abalone" - endpoint = "squid" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = PipelineServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "clam", - "location": "whelk", - "endpoint": "octopus", - } - path = PipelineServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_execution_path(): - project = "oyster" - location = "nudibranch" - metadata_store = "cuttlefish" - execution = "mussel" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - actual = PipelineServiceClient.execution_path(project, location, metadata_store, execution) - assert expected == actual - - -def test_parse_execution_path(): - expected = { - "project": "winkle", - "location": "nautilus", - "metadata_store": "scallop", - "execution": "abalone", - } - path = PipelineServiceClient.execution_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_execution_path(path) - assert expected == actual - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = PipelineServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = PipelineServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_model_path(path) - assert expected == actual - -def test_network_path(): - project = "cuttlefish" - network = "mussel" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = PipelineServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "winkle", - "network": "nautilus", - } - path = PipelineServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_network_path(path) - assert expected == actual - -def test_pipeline_job_path(): - project = "scallop" - location = "abalone" - pipeline_job = "squid" - expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) - actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) - assert expected == actual - - -def test_parse_pipeline_job_path(): - expected = { - "project": "clam", - "location": "whelk", - "pipeline_job": "octopus", - } - path = PipelineServiceClient.pipeline_job_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_pipeline_job_path(path) - assert expected == actual - -def test_training_pipeline_path(): - project = "oyster" - location = "nudibranch" - training_pipeline = "cuttlefish" - expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - actual = PipelineServiceClient.training_pipeline_path(project, location, training_pipeline) - assert expected == actual - - -def test_parse_training_pipeline_path(): - expected = { - "project": "mussel", - "location": "winkle", - "training_pipeline": "nautilus", - } - path = PipelineServiceClient.training_pipeline_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_training_pipeline_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "scallop" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PipelineServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "abalone", - } - path = PipelineServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "squid" - expected = "folders/{folder}".format(folder=folder, ) - actual = PipelineServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "clam", - } - path = PipelineServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "whelk" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PipelineServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "octopus", - } - path = PipelineServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "oyster" - expected = "projects/{project}".format(project=project, ) - actual = PipelineServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nudibranch", - } - path = PipelineServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "cuttlefish" - location = "mussel" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PipelineServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "winkle", - "location": "nautilus", - } - path = PipelineServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PipelineServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py deleted file mode 100644 index 22e6da9f92..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py +++ /dev/null @@ -1,1426 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.prediction_service import PredictionServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.prediction_service import PredictionServiceClient -from google.cloud.aiplatform_v1beta1.services.prediction_service import transports -from google.cloud.aiplatform_v1beta1.services.prediction_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import prediction_service -from google.oauth2 import service_account -from google.protobuf import struct_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PredictionServiceClient._get_default_mtls_endpoint(None) is None - assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PredictionServiceGrpcTransport, "grpc"), - (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_prediction_service_client_get_transport_class(): - transport = PredictionServiceClient.get_transport_class() - available_transports = [ - transports.PredictionServiceGrpcTransport, - ] - assert transport in available_transports - - transport = PredictionServiceClient.get_transport_class("grpc") - assert transport == transports.PredictionServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -def test_prediction_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_prediction_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PredictionServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_predict(transport: str = 'grpc', request_type=prediction_service.PredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse( - deployed_model_id='deployed_model_id_value', - ) - response = client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -def test_predict_from_dict(): - test_predict(request_type=dict) - - -def test_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - client.predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - -@pytest.mark.asyncio -async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( - deployed_model_id='deployed_model_id_value', - )) - response = await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -@pytest.mark.asyncio -async def test_predict_async_from_dict(): - await test_predict_async(request_type=dict) - - -def test_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = prediction_service.PredictResponse() - client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.predict( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - - -def test_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.predict( - prediction_service.PredictRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - -@pytest.mark.asyncio -async def test_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.predict( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - - -@pytest.mark.asyncio -async def test_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.predict( - prediction_service.PredictRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - -def test_explain(transport: str = 'grpc', request_type=prediction_service.ExplainRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.ExplainResponse( - deployed_model_id='deployed_model_id_value', - ) - response = client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.ExplainRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.ExplainResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -def test_explain_from_dict(): - test_explain(request_type=dict) - - -def test_explain_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - client.explain() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.ExplainRequest() - - -@pytest.mark.asyncio -async def test_explain_async(transport: str = 'grpc_asyncio', request_type=prediction_service.ExplainRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse( - deployed_model_id='deployed_model_id_value', - )) - response = await client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.ExplainRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.ExplainResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -@pytest.mark.asyncio -async def test_explain_async_from_dict(): - await test_explain_async(request_type=dict) - - -def test_explain_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.ExplainRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - call.return_value = prediction_service.ExplainResponse() - client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_explain_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.ExplainRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) - await client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_explain_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.ExplainResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.explain( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - assert args[0].deployed_model_id == 'deployed_model_id_value' - - -def test_explain_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.explain( - prediction_service.ExplainRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - -@pytest.mark.asyncio -async def test_explain_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.ExplainResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.explain( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].endpoint == 'endpoint_value' - assert args[0].instances == [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert args[0].parameters == struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - assert args[0].deployed_model_id == 'deployed_model_id_value' - - -@pytest.mark.asyncio -async def test_explain_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.explain( - prediction_service.ExplainRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PredictionServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PredictionServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PredictionServiceGrpcTransport, - ) - -def test_prediction_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_prediction_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'predict', - 'explain', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - -@requires_google_auth_gte_1_25_0 -def test_prediction_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_prediction_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_prediction_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_prediction_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PredictionServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_prediction_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PredictionServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_prediction_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_prediction_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PredictionServiceGrpcTransport, grpc_helpers), - (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_prediction_service_host_no_port(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_prediction_service_host_with_port(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_prediction_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_prediction_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = PredictionServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = PredictionServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PredictionServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = PredictionServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = PredictionServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = PredictionServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PredictionServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = PredictionServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = PredictionServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = PredictionServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PredictionServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = PredictionServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PredictionServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py deleted file mode 100644 index 8fcec601da..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py +++ /dev/null @@ -1,2346 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import SpecialistPoolServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import SpecialistPoolServiceClient -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import transports -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(None) is None - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_specialist_pool_service_client_get_transport_class(): - transport = SpecialistPoolServiceClient.get_transport_class() - available_transports = [ - transports.SpecialistPoolServiceGrpcTransport, - ] - assert transport in available_transports - - transport = SpecialistPoolServiceClient.get_transport_class("grpc") - assert transport == transports.SpecialistPoolServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) -@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) -def test_specialist_pool_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "true"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "false"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) -@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_specialist_pool_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_specialist_pool_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = SpecialistPoolServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.CreateSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_specialist_pool_from_dict(): - test_create_specialist_pool(request_type=dict) - - -def test_create_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - client.create_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_create_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.CreateSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_specialist_pool_async_from_dict(): - await test_create_specialist_pool_async(request_type=dict) - - -def test_create_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.CreateSpecialistPoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.CreateSpecialistPoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_specialist_pool( - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') - - -def test_create_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_specialist_pool( - specialist_pool_service.CreateSpecialistPoolRequest(), - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_specialist_pool( - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') - - -@pytest.mark.asyncio -async def test_create_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_specialist_pool( - specialist_pool_service.CreateSpecialistPoolRequest(), - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - -def test_get_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.GetSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool( - name='name_value', - display_name='display_name_value', - specialist_managers_count=2662, - specialist_manager_emails=['specialist_manager_emails_value'], - pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], - ) - response = client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, specialist_pool.SpecialistPool) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.specialist_managers_count == 2662 - assert response.specialist_manager_emails == ['specialist_manager_emails_value'] - assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] - - -def test_get_specialist_pool_from_dict(): - test_get_specialist_pool(request_type=dict) - - -def test_get_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - client.get_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_get_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.GetSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool( - name='name_value', - display_name='display_name_value', - specialist_managers_count=2662, - specialist_manager_emails=['specialist_manager_emails_value'], - pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], - )) - response = await client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, specialist_pool.SpecialistPool) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.specialist_managers_count == 2662 - assert response.specialist_manager_emails == ['specialist_manager_emails_value'] - assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] - - -@pytest.mark.asyncio -async def test_get_specialist_pool_async_from_dict(): - await test_get_specialist_pool_async(request_type=dict) - - -def test_get_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.GetSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - call.return_value = specialist_pool.SpecialistPool() - client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.GetSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) - await client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_specialist_pool( - specialist_pool_service.GetSpecialistPoolRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_specialist_pool( - specialist_pool_service.GetSpecialistPoolRequest(), - name='name_value', - ) - - -def test_list_specialist_pools(transport: str = 'grpc', request_type=specialist_pool_service.ListSpecialistPoolsRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSpecialistPoolsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_specialist_pools_from_dict(): - test_list_specialist_pools(request_type=dict) - - -def test_list_specialist_pools_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - client.list_specialist_pools() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - -@pytest.mark.asyncio -async def test_list_specialist_pools_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.ListSpecialistPoolsRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_from_dict(): - await test_list_specialist_pools_async(request_type=dict) - - -def test_list_specialist_pools_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.ListSpecialistPoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_specialist_pools_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.ListSpecialistPoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) - await client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_specialist_pools_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_specialist_pools( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_specialist_pools_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_specialist_pools( - specialist_pool_service.ListSpecialistPoolsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_specialist_pools_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_specialist_pools( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_specialist_pools_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_specialist_pools( - specialist_pool_service.ListSpecialistPoolsRequest(), - parent='parent_value', - ) - - -def test_list_specialist_pools_pager(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_specialist_pools(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, specialist_pool.SpecialistPool) - for i in results) - -def test_list_specialist_pools_pages(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - pages = list(client.list_specialist_pools(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_pager(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_specialist_pools(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, specialist_pool.SpecialistPool) - for i in responses) - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_pages(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_specialist_pools(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_specialist_pool_from_dict(): - test_delete_specialist_pool(request_type=dict) - - -def test_delete_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - client.delete_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_async_from_dict(): - await test_delete_specialist_pool_async(request_type=dict) - - -def test_delete_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.DeleteSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.DeleteSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_specialist_pool( - specialist_pool_service.DeleteSpecialistPoolRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_specialist_pool( - specialist_pool_service.DeleteSpecialistPoolRequest(), - name='name_value', - ) - - -def test_update_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_specialist_pool_from_dict(): - test_update_specialist_pool(request_type=dict) - - -def test_update_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - client.update_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_update_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_async_from_dict(): - await test_update_specialist_pool_async(request_type=dict) - - -def test_update_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.UpdateSpecialistPoolRequest() - - request.specialist_pool.name = 'specialist_pool.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'specialist_pool.name=specialist_pool.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.UpdateSpecialistPoolRequest() - - request.specialist_pool.name = 'specialist_pool.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'specialist_pool.name=specialist_pool.name/value', - ) in kw['metadata'] - - -def test_update_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_specialist_pool( - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_specialist_pool( - specialist_pool_service.UpdateSpecialistPoolRequest(), - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_specialist_pool( - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].specialist_pool == gca_specialist_pool.SpecialistPool(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_specialist_pool( - specialist_pool_service.UpdateSpecialistPoolRequest(), - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = SpecialistPoolServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.SpecialistPoolServiceGrpcTransport, - ) - -def test_specialist_pool_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.SpecialistPoolServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_specialist_pool_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.SpecialistPoolServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_specialist_pool', - 'get_specialist_pool', - 'list_specialist_pools', - 'delete_specialist_pool', - 'update_specialist_pool', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_specialist_pool_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_specialist_pool_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_specialist_pool_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_specialist_pool_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - SpecialistPoolServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_specialist_pool_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - SpecialistPoolServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_specialist_pool_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_specialist_pool_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.SpecialistPoolServiceGrpcTransport, grpc_helpers), - (transports.SpecialistPoolServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_specialist_pool_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_specialist_pool_service_host_no_port(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_specialist_pool_service_host_with_port(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_specialist_pool_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.SpecialistPoolServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_specialist_pool_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_specialist_pool_service_grpc_lro_client(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_specialist_pool_service_grpc_lro_async_client(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_specialist_pool_path(): - project = "squid" - location = "clam" - specialist_pool = "whelk" - expected = "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) - actual = SpecialistPoolServiceClient.specialist_pool_path(project, location, specialist_pool) - assert expected == actual - - -def test_parse_specialist_pool_path(): - expected = { - "project": "octopus", - "location": "oyster", - "specialist_pool": "nudibranch", - } - path = SpecialistPoolServiceClient.specialist_pool_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_specialist_pool_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = SpecialistPoolServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = SpecialistPoolServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = SpecialistPoolServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = SpecialistPoolServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = SpecialistPoolServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = SpecialistPoolServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = SpecialistPoolServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = SpecialistPoolServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = SpecialistPoolServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = SpecialistPoolServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = SpecialistPoolServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py deleted file mode 100644 index db7bfea262..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py +++ /dev/null @@ -1,7781 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import TensorboardServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import TensorboardServiceClient -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import transports -from google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert TensorboardServiceClient._get_default_mtls_endpoint(None) is None - assert TensorboardServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, -]) -def test_tensorboard_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, -]) -def test_tensorboard_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.TensorboardServiceGrpcTransport, "grpc"), - (transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_tensorboard_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, -]) -def test_tensorboard_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_tensorboard_service_client_get_transport_class(): - transport = TensorboardServiceClient.get_transport_class() - available_transports = [ - transports.TensorboardServiceGrpcTransport, - ] - assert transport in available_transports - - transport = TensorboardServiceClient.get_transport_class("grpc") - assert transport == transports.TensorboardServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) -@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) -def test_tensorboard_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "true"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "false"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) -@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_tensorboard_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_tensorboard_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_tensorboard_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_tensorboard_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = TensorboardServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_tensorboard_from_dict(): - test_create_tensorboard(request_type=dict) - - -def test_create_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - client.create_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_tensorboard_async_from_dict(): - await test_create_tensorboard_async(request_type=dict) - - -def test_create_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard( - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].tensorboard == gca_tensorboard.Tensorboard(name='name_value') - - -def test_create_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard( - tensorboard_service.CreateTensorboardRequest(), - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard( - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].tensorboard == gca_tensorboard.Tensorboard(name='name_value') - - -@pytest.mark.asyncio -async def test_create_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard( - tensorboard_service.CreateTensorboardRequest(), - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - -def test_get_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard.Tensorboard( - name='name_value', - display_name='display_name_value', - description='description_value', - blob_storage_path_prefix='blob_storage_path_prefix_value', - run_count=989, - etag='etag_value', - ) - response = client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard.Tensorboard) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' - assert response.run_count == 989 - assert response.etag == 'etag_value' - - -def test_get_tensorboard_from_dict(): - test_get_tensorboard(request_type=dict) - - -def test_get_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - client.get_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard( - name='name_value', - display_name='display_name_value', - description='description_value', - blob_storage_path_prefix='blob_storage_path_prefix_value', - run_count=989, - etag='etag_value', - )) - response = await client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard.Tensorboard) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' - assert response.run_count == 989 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_async_from_dict(): - await test_get_tensorboard_async(request_type=dict) - - -def test_get_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - call.return_value = tensorboard.Tensorboard() - client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) - await client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard.Tensorboard() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard( - tensorboard_service.GetTensorboardRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard.Tensorboard() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard( - tensorboard_service.GetTensorboardRequest(), - name='name_value', - ) - - -def test_update_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_tensorboard_from_dict(): - test_update_tensorboard(request_type=dict) - - -def test_update_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - client.update_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_tensorboard_async_from_dict(): - await test_update_tensorboard_async(request_type=dict) - - -def test_update_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRequest() - - request.tensorboard.name = 'tensorboard.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard.name=tensorboard.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRequest() - - request.tensorboard.name = 'tensorboard.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard.name=tensorboard.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard( - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard == gca_tensorboard.Tensorboard(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard( - tensorboard_service.UpdateTensorboardRequest(), - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard( - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard == gca_tensorboard.Tensorboard(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard( - tensorboard_service.UpdateTensorboardRequest(), - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboards(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboards_from_dict(): - test_list_tensorboards(request_type=dict) - - -def test_list_tensorboards_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - client.list_tensorboards() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardsRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboards_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboards_async_from_dict(): - await test_list_tensorboards_async(request_type=dict) - - -def test_list_tensorboards_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardsResponse() - client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboards_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) - await client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboards_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboards( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_tensorboards_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboards( - tensorboard_service.ListTensorboardsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboards_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboards( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_tensorboards_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboards( - tensorboard_service.ListTensorboardsRequest(), - parent='parent_value', - ) - - -def test_list_tensorboards_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboards(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard.Tensorboard) - for i in results) - -def test_list_tensorboards_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboards(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboards_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboards(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard.Tensorboard) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboards_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboards(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_from_dict(): - test_delete_tensorboard(request_type=dict) - - -def test_delete_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - client.delete_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_async_from_dict(): - await test_delete_tensorboard_async(request_type=dict) - - -def test_delete_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard( - tensorboard_service.DeleteTensorboardRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard( - tensorboard_service.DeleteTensorboardRequest(), - name='name_value', - ) - - -def test_create_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - ) - response = client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -def test_create_tensorboard_experiment_from_dict(): - test_create_tensorboard_experiment(request_type=dict) - - -def test_create_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - client.create_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - )) - response = await client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_async_from_dict(): - await test_create_tensorboard_experiment_async(request_type=dict) - - -def test_create_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardExperimentRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardExperimentRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - await client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard_experiment( - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert args[0].tensorboard_experiment_id == 'tensorboard_experiment_id_value' - - -def test_create_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard_experiment( - tensorboard_service.CreateTensorboardExperimentRequest(), - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard_experiment( - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert args[0].tensorboard_experiment_id == 'tensorboard_experiment_id_value' - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard_experiment( - tensorboard_service.CreateTensorboardExperimentRequest(), - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - -def test_get_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - ) - response = client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -def test_get_tensorboard_experiment_from_dict(): - test_get_tensorboard_experiment(request_type=dict) - - -def test_get_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - client.get_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - )) - response = await client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_async_from_dict(): - await test_get_tensorboard_experiment_async(request_type=dict) - - -def test_get_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - call.return_value = tensorboard_experiment.TensorboardExperiment() - client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) - await client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_experiment.TensorboardExperiment() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard_experiment( - tensorboard_service.GetTensorboardExperimentRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_experiment.TensorboardExperiment() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard_experiment( - tensorboard_service.GetTensorboardExperimentRequest(), - name='name_value', - ) - - -def test_update_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - ) - response = client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -def test_update_tensorboard_experiment_from_dict(): - test_update_tensorboard_experiment(request_type=dict) - - -def test_update_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - client.update_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - )) - response = await client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_async_from_dict(): - await test_update_tensorboard_experiment_async(request_type=dict) - - -def test_update_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardExperimentRequest() - - request.tensorboard_experiment.name = 'tensorboard_experiment.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment.name=tensorboard_experiment.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardExperimentRequest() - - request.tensorboard_experiment.name = 'tensorboard_experiment.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - await client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment.name=tensorboard_experiment.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard_experiment( - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard_experiment( - tensorboard_service.UpdateTensorboardExperimentRequest(), - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard_experiment( - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_experiment == gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard_experiment( - tensorboard_service.UpdateTensorboardExperimentRequest(), - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboard_experiments(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardExperimentsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardExperimentsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboard_experiments_from_dict(): - test_list_tensorboard_experiments(request_type=dict) - - -def test_list_tensorboard_experiments_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - client.list_tensorboard_experiments() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardExperimentsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardExperimentsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async_from_dict(): - await test_list_tensorboard_experiments_async(request_type=dict) - - -def test_list_tensorboard_experiments_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardExperimentsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() - client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardExperimentsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) - await client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboard_experiments_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboard_experiments( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_tensorboard_experiments_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboard_experiments( - tensorboard_service.ListTensorboardExperimentsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboard_experiments( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboard_experiments( - tensorboard_service.ListTensorboardExperimentsRequest(), - parent='parent_value', - ) - - -def test_list_tensorboard_experiments_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboard_experiments(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) - for i in results) - -def test_list_tensorboard_experiments_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboard_experiments(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboard_experiments(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboard_experiments(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_experiment_from_dict(): - test_delete_tensorboard_experiment(request_type=dict) - - -def test_delete_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - client.delete_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_async_from_dict(): - await test_delete_tensorboard_experiment_async(request_type=dict) - - -def test_delete_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard_experiment( - tensorboard_service.DeleteTensorboardExperimentRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard_experiment( - tensorboard_service.DeleteTensorboardExperimentRequest(), - name='name_value', - ) - - -def test_create_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_create_tensorboard_run_from_dict(): - test_create_tensorboard_run(request_type=dict) - - -def test_create_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - client.create_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_async_from_dict(): - await test_create_tensorboard_run_async(request_type=dict) - - -def test_create_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRunRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - call.return_value = gca_tensorboard_run.TensorboardRun() - client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRunRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - await client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard_run( - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun(name='name_value') - assert args[0].tensorboard_run_id == 'tensorboard_run_id_value' - - -def test_create_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard_run( - tensorboard_service.CreateTensorboardRunRequest(), - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard_run( - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun(name='name_value') - assert args[0].tensorboard_run_id == 'tensorboard_run_id_value' - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard_run( - tensorboard_service.CreateTensorboardRunRequest(), - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - -def test_get_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_get_tensorboard_run_from_dict(): - test_get_tensorboard_run(request_type=dict) - - -def test_get_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - client.get_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_async_from_dict(): - await test_get_tensorboard_run_async(request_type=dict) - - -def test_get_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - call.return_value = tensorboard_run.TensorboardRun() - client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) - await client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_run.TensorboardRun() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard_run( - tensorboard_service.GetTensorboardRunRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_run.TensorboardRun() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard_run( - tensorboard_service.GetTensorboardRunRequest(), - name='name_value', - ) - - -def test_update_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_update_tensorboard_run_from_dict(): - test_update_tensorboard_run(request_type=dict) - - -def test_update_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - client.update_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_async_from_dict(): - await test_update_tensorboard_run_async(request_type=dict) - - -def test_update_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRunRequest() - - request.tensorboard_run.name = 'tensorboard_run.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - call.return_value = gca_tensorboard_run.TensorboardRun() - client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run.name=tensorboard_run.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRunRequest() - - request.tensorboard_run.name = 'tensorboard_run.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - await client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run.name=tensorboard_run.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard_run( - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard_run( - tensorboard_service.UpdateTensorboardRunRequest(), - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard_run( - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_run == gca_tensorboard_run.TensorboardRun(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard_run( - tensorboard_service.UpdateTensorboardRunRequest(), - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboard_runs(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardRunsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardRunsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardRunsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboard_runs_from_dict(): - test_list_tensorboard_runs(request_type=dict) - - -def test_list_tensorboard_runs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - client.list_tensorboard_runs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardRunsRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardRunsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardRunsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async_from_dict(): - await test_list_tensorboard_runs_async(request_type=dict) - - -def test_list_tensorboard_runs_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardRunsResponse() - client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) - await client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboard_runs_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardRunsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboard_runs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_tensorboard_runs_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboard_runs( - tensorboard_service.ListTensorboardRunsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardRunsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboard_runs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboard_runs( - tensorboard_service.ListTensorboardRunsRequest(), - parent='parent_value', - ) - - -def test_list_tensorboard_runs_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboard_runs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_run.TensorboardRun) - for i in results) - -def test_list_tensorboard_runs_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboard_runs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboard_runs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_run.TensorboardRun) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboard_runs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_run_from_dict(): - test_delete_tensorboard_run(request_type=dict) - - -def test_delete_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - client.delete_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_async_from_dict(): - await test_delete_tensorboard_run_async(request_type=dict) - - -def test_delete_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard_run( - tensorboard_service.DeleteTensorboardRunRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard_run( - tensorboard_service.DeleteTensorboardRunRequest(), - name='name_value', - ) - - -def test_create_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - ) - response = client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -def test_create_tensorboard_time_series_from_dict(): - test_create_tensorboard_time_series(request_type=dict) - - -def test_create_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - client.create_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - )) - response = await client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_async_from_dict(): - await test_create_tensorboard_time_series_async(request_type=dict) - - -def test_create_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - await client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard_time_series( - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - - -def test_create_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard_time_series( - tensorboard_service.CreateTensorboardTimeSeriesRequest(), - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard_time_series( - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard_time_series( - tensorboard_service.CreateTensorboardTimeSeriesRequest(), - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - -def test_get_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - ) - response = client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -def test_get_tensorboard_time_series_from_dict(): - test_get_tensorboard_time_series(request_type=dict) - - -def test_get_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - client.get_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - )) - response = await client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_async_from_dict(): - await test_get_tensorboard_time_series_async(request_type=dict) - - -def test_get_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - call.return_value = tensorboard_time_series.TensorboardTimeSeries() - client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) - await client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_time_series.TensorboardTimeSeries() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard_time_series( - tensorboard_service.GetTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_time_series.TensorboardTimeSeries() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard_time_series( - tensorboard_service.GetTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -def test_update_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - ) - response = client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -def test_update_tensorboard_time_series_from_dict(): - test_update_tensorboard_time_series(request_type=dict) - - -def test_update_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - client.update_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - )) - response = await client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_async_from_dict(): - await test_update_tensorboard_time_series_async(request_type=dict) - - -def test_update_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - request.tensorboard_time_series.name = 'tensorboard_time_series.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series.name=tensorboard_time_series.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - request.tensorboard_time_series.name = 'tensorboard_time_series.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - await client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series.name=tensorboard_time_series.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard_time_series( - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -def test_update_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard_time_series( - tensorboard_service.UpdateTensorboardTimeSeriesRequest(), - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard_time_series( - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_time_series == gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value']) - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard_time_series( - tensorboard_service.UpdateTensorboardTimeSeriesRequest(), - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardTimeSeriesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboard_time_series_from_dict(): - test_list_tensorboard_time_series(request_type=dict) - - -def test_list_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - client.list_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardTimeSeriesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async_from_dict(): - await test_list_tensorboard_time_series_async(request_type=dict) - - -def test_list_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() - client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) - await client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboard_time_series( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboard_time_series( - tensorboard_service.ListTensorboardTimeSeriesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboard_time_series( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboard_time_series( - tensorboard_service.ListTensorboardTimeSeriesRequest(), - parent='parent_value', - ) - - -def test_list_tensorboard_time_series_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboard_time_series(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) - for i in results) - -def test_list_tensorboard_time_series_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboard_time_series(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboard_time_series(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboard_time_series(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_time_series_from_dict(): - test_delete_tensorboard_time_series(request_type=dict) - - -def test_delete_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - client.delete_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_async_from_dict(): - await test_delete_tensorboard_time_series_async(request_type=dict) - - -def test_delete_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard_time_series( - tensorboard_service.DeleteTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard_time_series( - tensorboard_service.DeleteTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -def test_read_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse( - ) - response = client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) - - -def test_read_tensorboard_time_series_data_from_dict(): - test_read_tensorboard_time_series_data(request_type=dict) - - -def test_read_tensorboard_time_series_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - client.read_tensorboard_time_series_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse( - )) - response = await client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_async_from_dict(): - await test_read_tensorboard_time_series_data_async(request_type=dict) - - -def test_read_tensorboard_time_series_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() - client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) - await client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -def test_read_tensorboard_time_series_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_time_series == 'tensorboard_time_series_value' - - -def test_read_tensorboard_time_series_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_tensorboard_time_series_data( - tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_time_series == 'tensorboard_time_series_value' - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_tensorboard_time_series_data( - tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -def test_read_tensorboard_blob_data(transport: str = 'grpc', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - response = client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) - - -def test_read_tensorboard_blob_data_from_dict(): - test_read_tensorboard_blob_data(request_type=dict) - - -def test_read_tensorboard_blob_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - client.read_tensorboard_blob_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) - response = await client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_async_from_dict(): - await test_read_tensorboard_blob_data_async(request_type=dict) - - -def test_read_tensorboard_blob_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardBlobDataRequest() - - request.time_series = 'time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'time_series=time_series/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardBlobDataRequest() - - request.time_series = 'time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) - await client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'time_series=time_series/value', - ) in kw['metadata'] - - -def test_read_tensorboard_blob_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_tensorboard_blob_data( - time_series='time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].time_series == 'time_series_value' - - -def test_read_tensorboard_blob_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_tensorboard_blob_data( - tensorboard_service.ReadTensorboardBlobDataRequest(), - time_series='time_series_value', - ) - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_tensorboard_blob_data( - time_series='time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].time_series == 'time_series_value' - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_tensorboard_blob_data( - tensorboard_service.ReadTensorboardBlobDataRequest(), - time_series='time_series_value', - ) - - -def test_write_tensorboard_run_data(transport: str = 'grpc', request_type=tensorboard_service.WriteTensorboardRunDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse( - ) - response = client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) - - -def test_write_tensorboard_run_data_from_dict(): - test_write_tensorboard_run_data(request_type=dict) - - -def test_write_tensorboard_run_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - client.write_tensorboard_run_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardRunDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse( - )) - response = await client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_async_from_dict(): - await test_write_tensorboard_run_data_async(request_type=dict) - - -def test_write_tensorboard_run_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardRunDataRequest() - - request.tensorboard_run = 'tensorboard_run/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() - client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run=tensorboard_run/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardRunDataRequest() - - request.tensorboard_run = 'tensorboard_run/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) - await client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run=tensorboard_run/value', - ) in kw['metadata'] - - -def test_write_tensorboard_run_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.write_tensorboard_run_data( - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_run == 'tensorboard_run_value' - assert args[0].time_series_data == [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] - - -def test_write_tensorboard_run_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.write_tensorboard_run_data( - tensorboard_service.WriteTensorboardRunDataRequest(), - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.write_tensorboard_run_data( - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_run == 'tensorboard_run_value' - assert args[0].time_series_data == [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.write_tensorboard_run_data( - tensorboard_service.WriteTensorboardRunDataRequest(), - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - -def test_export_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - next_page_token='next_page_token_value', - ) - response = client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_export_tensorboard_time_series_data_from_dict(): - test_export_tensorboard_time_series_data(request_type=dict) - - -def test_export_tensorboard_time_series_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - client.export_tensorboard_time_series_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - next_page_token='next_page_token_value', - )) - response = await client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async_from_dict(): - await test_export_tensorboard_time_series_data_async(request_type=dict) - - -def test_export_tensorboard_time_series_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() - client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) - await client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -def test_export_tensorboard_time_series_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_time_series == 'tensorboard_time_series_value' - - -def test_export_tensorboard_time_series_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_tensorboard_time_series_data( - tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].tensorboard_time_series == 'tensorboard_time_series_value' - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_tensorboard_time_series_data( - tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -def test_export_tensorboard_time_series_data_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('tensorboard_time_series', ''), - )), - ) - pager = client.export_tensorboard_time_series_data(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) - for i in results) - -def test_export_tensorboard_time_series_data_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - pages = list(client.export_tensorboard_time_series_data(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.export_tensorboard_time_series_data(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) - for i in responses) - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.export_tensorboard_time_series_data(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = TensorboardServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = TensorboardServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = TensorboardServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.TensorboardServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.TensorboardServiceGrpcTransport, - transports.TensorboardServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.TensorboardServiceGrpcTransport, - ) - -def test_tensorboard_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.TensorboardServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_tensorboard_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.TensorboardServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_tensorboard', - 'get_tensorboard', - 'update_tensorboard', - 'list_tensorboards', - 'delete_tensorboard', - 'create_tensorboard_experiment', - 'get_tensorboard_experiment', - 'update_tensorboard_experiment', - 'list_tensorboard_experiments', - 'delete_tensorboard_experiment', - 'create_tensorboard_run', - 'get_tensorboard_run', - 'update_tensorboard_run', - 'list_tensorboard_runs', - 'delete_tensorboard_run', - 'create_tensorboard_time_series', - 'get_tensorboard_time_series', - 'update_tensorboard_time_series', - 'list_tensorboard_time_series', - 'delete_tensorboard_time_series', - 'read_tensorboard_time_series_data', - 'read_tensorboard_blob_data', - 'write_tensorboard_run_data', - 'export_tensorboard_time_series_data', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_tensorboard_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.TensorboardServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_tensorboard_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.TensorboardServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_tensorboard_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.TensorboardServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_tensorboard_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - TensorboardServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_tensorboard_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - TensorboardServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.TensorboardServiceGrpcTransport, - transports.TensorboardServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_tensorboard_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.TensorboardServiceGrpcTransport, - transports.TensorboardServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_tensorboard_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.TensorboardServiceGrpcTransport, grpc_helpers), - (transports.TensorboardServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_tensorboard_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) -def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_tensorboard_service_host_no_port(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_tensorboard_service_host_with_port(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_tensorboard_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.TensorboardServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_tensorboard_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.TensorboardServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) -def test_tensorboard_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) -def test_tensorboard_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_tensorboard_service_grpc_lro_client(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_tensorboard_service_grpc_lro_async_client(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_tensorboard_path(): - project = "squid" - location = "clam" - tensorboard = "whelk" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - actual = TensorboardServiceClient.tensorboard_path(project, location, tensorboard) - assert expected == actual - - -def test_parse_tensorboard_path(): - expected = { - "project": "octopus", - "location": "oyster", - "tensorboard": "nudibranch", - } - path = TensorboardServiceClient.tensorboard_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_path(path) - assert expected == actual - -def test_tensorboard_experiment_path(): - project = "cuttlefish" - location = "mussel" - tensorboard = "winkle" - experiment = "nautilus" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) - actual = TensorboardServiceClient.tensorboard_experiment_path(project, location, tensorboard, experiment) - assert expected == actual - - -def test_parse_tensorboard_experiment_path(): - expected = { - "project": "scallop", - "location": "abalone", - "tensorboard": "squid", - "experiment": "clam", - } - path = TensorboardServiceClient.tensorboard_experiment_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_experiment_path(path) - assert expected == actual - -def test_tensorboard_run_path(): - project = "whelk" - location = "octopus" - tensorboard = "oyster" - experiment = "nudibranch" - run = "cuttlefish" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) - actual = TensorboardServiceClient.tensorboard_run_path(project, location, tensorboard, experiment, run) - assert expected == actual - - -def test_parse_tensorboard_run_path(): - expected = { - "project": "mussel", - "location": "winkle", - "tensorboard": "nautilus", - "experiment": "scallop", - "run": "abalone", - } - path = TensorboardServiceClient.tensorboard_run_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_run_path(path) - assert expected == actual - -def test_tensorboard_time_series_path(): - project = "squid" - location = "clam" - tensorboard = "whelk" - experiment = "octopus" - run = "oyster" - time_series = "nudibranch" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) - actual = TensorboardServiceClient.tensorboard_time_series_path(project, location, tensorboard, experiment, run, time_series) - assert expected == actual - - -def test_parse_tensorboard_time_series_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - "tensorboard": "winkle", - "experiment": "nautilus", - "run": "scallop", - "time_series": "abalone", - } - path = TensorboardServiceClient.tensorboard_time_series_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_time_series_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = TensorboardServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = TensorboardServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = TensorboardServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = TensorboardServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = TensorboardServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = TensorboardServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = TensorboardServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = TensorboardServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = TensorboardServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = TensorboardServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = TensorboardServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py deleted file mode 100644 index dc3164b9a5..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py +++ /dev/null @@ -1,4599 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceClient -from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers -from google.cloud.aiplatform_v1beta1.services.vizier_service import transports -from google.cloud.aiplatform_v1beta1.services.vizier_service.transports.base import _GOOGLE_AUTH_VERSION -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert VizierServiceClient._get_default_mtls_endpoint(None) is None - assert VizierServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - VizierServiceClient, - VizierServiceAsyncClient, -]) -def test_vizier_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("client_class", [ - VizierServiceClient, - VizierServiceAsyncClient, -]) -def test_vizier_service_client_service_account_always_use_jwt(client_class): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.VizierServiceGrpcTransport, "grpc"), - (transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_vizier_service_client_service_account_always_use_jwt_true(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - -@pytest.mark.parametrize("client_class", [ - VizierServiceClient, - VizierServiceAsyncClient, -]) -def test_vizier_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_vizier_service_client_get_transport_class(): - transport = VizierServiceClient.get_transport_class() - available_transports = [ - transports.VizierServiceGrpcTransport, - ] - assert transport in available_transports - - transport = VizierServiceClient.get_transport_class("grpc") - assert transport == transports.VizierServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) -@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) -def test_vizier_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "true"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "false"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) -@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_vizier_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_vizier_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_vizier_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_vizier_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = VizierServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) - - -def test_create_study(transport: str = 'grpc', request_type=vizier_service.CreateStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_study.Study( - name='name_value', - display_name='display_name_value', - state=gca_study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - ) - response = client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -def test_create_study_from_dict(): - test_create_study(request_type=dict) - - -def test_create_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - client.create_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateStudyRequest() - - -@pytest.mark.asyncio -async def test_create_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study( - name='name_value', - display_name='display_name_value', - state=gca_study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - )) - response = await client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -@pytest.mark.asyncio -async def test_create_study_async_from_dict(): - await test_create_study_async(request_type=dict) - - -def test_create_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - call.return_value = gca_study.Study() - client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) - await client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_study.Study() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_study( - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].study == gca_study.Study(name='name_value') - - -def test_create_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_study( - vizier_service.CreateStudyRequest(), - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_study.Study() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_study( - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].study == gca_study.Study(name='name_value') - - -@pytest.mark.asyncio -async def test_create_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_study( - vizier_service.CreateStudyRequest(), - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - -def test_get_study(transport: str = 'grpc', request_type=vizier_service.GetStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - ) - response = client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -def test_get_study_from_dict(): - test_get_study(request_type=dict) - - -def test_get_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - client.get_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetStudyRequest() - - -@pytest.mark.asyncio -async def test_get_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - )) - response = await client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -@pytest.mark.asyncio -async def test_get_study_async_from_dict(): - await test_get_study_async(request_type=dict) - - -def test_get_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - call.return_value = study.Study() - client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - await client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_study( - vizier_service.GetStudyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_study( - vizier_service.GetStudyRequest(), - name='name_value', - ) - - -def test_list_studies(transport: str = 'grpc', request_type=vizier_service.ListStudiesRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListStudiesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListStudiesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListStudiesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_studies_from_dict(): - test_list_studies(request_type=dict) - - -def test_list_studies_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - client.list_studies() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListStudiesRequest() - - -@pytest.mark.asyncio -async def test_list_studies_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListStudiesRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListStudiesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListStudiesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_studies_async_from_dict(): - await test_list_studies_async(request_type=dict) - - -def test_list_studies_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListStudiesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - call.return_value = vizier_service.ListStudiesResponse() - client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_studies_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListStudiesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) - await client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_studies_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListStudiesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_studies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_studies_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_studies( - vizier_service.ListStudiesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_studies_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListStudiesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_studies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_studies_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_studies( - vizier_service.ListStudiesRequest(), - parent='parent_value', - ) - - -def test_list_studies_pager(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_studies(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, study.Study) - for i in results) - -def test_list_studies_pages(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - pages = list(client.list_studies(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_studies_async_pager(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_studies(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, study.Study) - for i in responses) - -@pytest.mark.asyncio -async def test_list_studies_async_pages(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_studies(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_study(transport: str = 'grpc', request_type=vizier_service.DeleteStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteStudyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_study_from_dict(): - test_delete_study(request_type=dict) - - -def test_delete_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - client.delete_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteStudyRequest() - - -@pytest.mark.asyncio -async def test_delete_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteStudyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_study_async_from_dict(): - await test_delete_study_async(request_type=dict) - - -def test_delete_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - call.return_value = None - client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_study( - vizier_service.DeleteStudyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_study( - vizier_service.DeleteStudyRequest(), - name='name_value', - ) - - -def test_lookup_study(transport: str = 'grpc', request_type=vizier_service.LookupStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - ) - response = client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.LookupStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -def test_lookup_study_from_dict(): - test_lookup_study(request_type=dict) - - -def test_lookup_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - client.lookup_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.LookupStudyRequest() - - -@pytest.mark.asyncio -async def test_lookup_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.LookupStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - )) - response = await client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.LookupStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -@pytest.mark.asyncio -async def test_lookup_study_async_from_dict(): - await test_lookup_study_async(request_type=dict) - - -def test_lookup_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.LookupStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - call.return_value = study.Study() - client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_lookup_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.LookupStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - await client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_lookup_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.lookup_study( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_lookup_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.lookup_study( - vizier_service.LookupStudyRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_lookup_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.lookup_study( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_lookup_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.lookup_study( - vizier_service.LookupStudyRequest(), - parent='parent_value', - ) - - -def test_suggest_trials(transport: str = 'grpc', request_type=vizier_service.SuggestTrialsRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.SuggestTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_suggest_trials_from_dict(): - test_suggest_trials(request_type=dict) - - -def test_suggest_trials_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - client.suggest_trials() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.SuggestTrialsRequest() - - -@pytest.mark.asyncio -async def test_suggest_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.SuggestTrialsRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.SuggestTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_suggest_trials_async_from_dict(): - await test_suggest_trials_async(request_type=dict) - - -def test_suggest_trials_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.SuggestTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_suggest_trials_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.SuggestTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_trial(transport: str = 'grpc', request_type=vizier_service.CreateTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_create_trial_from_dict(): - test_create_trial(request_type=dict) - - -def test_create_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - client.create_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateTrialRequest() - - -@pytest.mark.asyncio -async def test_create_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_create_trial_async_from_dict(): - await test_create_trial_async(request_type=dict) - - -def test_create_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateTrialRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - call.return_value = study.Trial() - client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateTrialRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_trial_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_trial( - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].trial == study.Trial(name='name_value') - - -def test_create_trial_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_trial( - vizier_service.CreateTrialRequest(), - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_trial_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_trial( - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - assert args[0].trial == study.Trial(name='name_value') - - -@pytest.mark.asyncio -async def test_create_trial_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_trial( - vizier_service.CreateTrialRequest(), - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - -def test_get_trial(transport: str = 'grpc', request_type=vizier_service.GetTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_get_trial_from_dict(): - test_get_trial(request_type=dict) - - -def test_get_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - client.get_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetTrialRequest() - - -@pytest.mark.asyncio -async def test_get_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_get_trial_async_from_dict(): - await test_get_trial_async(request_type=dict) - - -def test_get_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - call.return_value = study.Trial() - client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_trial_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_get_trial_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_trial( - vizier_service.GetTrialRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_trial_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_trial_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_trial( - vizier_service.GetTrialRequest(), - name='name_value', - ) - - -def test_list_trials(transport: str = 'grpc', request_type=vizier_service.ListTrialsRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListTrialsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrialsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_trials_from_dict(): - test_list_trials(request_type=dict) - - -def test_list_trials_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - client.list_trials() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListTrialsRequest() - - -@pytest.mark.asyncio -async def test_list_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListTrialsRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrialsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_trials_async_from_dict(): - await test_list_trials_async(request_type=dict) - - -def test_list_trials_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - call.return_value = vizier_service.ListTrialsResponse() - client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_trials_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) - await client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_trials_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListTrialsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_trials_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_trials( - vizier_service.ListTrialsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_trials_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListTrialsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_trials_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_trials( - vizier_service.ListTrialsRequest(), - parent='parent_value', - ) - - -def test_list_trials_pager(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_trials(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, study.Trial) - for i in results) - -def test_list_trials_pages(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - pages = list(client.list_trials(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_trials_async_pager(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_trials(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, study.Trial) - for i in responses) - -@pytest.mark.asyncio -async def test_list_trials_async_pages(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_trials(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_add_trial_measurement(transport: str = 'grpc', request_type=vizier_service.AddTrialMeasurementRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.AddTrialMeasurementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_add_trial_measurement_from_dict(): - test_add_trial_measurement(request_type=dict) - - -def test_add_trial_measurement_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - client.add_trial_measurement() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.AddTrialMeasurementRequest() - - -@pytest.mark.asyncio -async def test_add_trial_measurement_async(transport: str = 'grpc_asyncio', request_type=vizier_service.AddTrialMeasurementRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.AddTrialMeasurementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_add_trial_measurement_async_from_dict(): - await test_add_trial_measurement_async(request_type=dict) - - -def test_add_trial_measurement_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.AddTrialMeasurementRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - call.return_value = study.Trial() - client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_trial_measurement_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.AddTrialMeasurementRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -def test_complete_trial(transport: str = 'grpc', request_type=vizier_service.CompleteTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CompleteTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_complete_trial_from_dict(): - test_complete_trial(request_type=dict) - - -def test_complete_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - client.complete_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CompleteTrialRequest() - - -@pytest.mark.asyncio -async def test_complete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CompleteTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CompleteTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_complete_trial_async_from_dict(): - await test_complete_trial_async(request_type=dict) - - -def test_complete_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CompleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - call.return_value = study.Trial() - client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_complete_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CompleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_trial(transport: str = 'grpc', request_type=vizier_service.DeleteTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteTrialRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_trial_from_dict(): - test_delete_trial(request_type=dict) - - -def test_delete_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - client.delete_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteTrialRequest() - - -@pytest.mark.asyncio -async def test_delete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteTrialRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_trial_async_from_dict(): - await test_delete_trial_async(request_type=dict) - - -def test_delete_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - call.return_value = None - client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_trial_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -def test_delete_trial_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_trial( - vizier_service.DeleteTrialRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_trial_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_trial_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_trial( - vizier_service.DeleteTrialRequest(), - name='name_value', - ) - - -def test_check_trial_early_stopping_state(transport: str = 'grpc', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_check_trial_early_stopping_state_from_dict(): - test_check_trial_early_stopping_state(request_type=dict) - - -def test_check_trial_early_stopping_state_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - client.check_trial_early_stopping_state() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() - - -@pytest.mark.asyncio -async def test_check_trial_early_stopping_state_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_check_trial_early_stopping_state_async_from_dict(): - await test_check_trial_early_stopping_state_async(request_type=dict) - - -def test_check_trial_early_stopping_state_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CheckTrialEarlyStoppingStateRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_check_trial_early_stopping_state_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CheckTrialEarlyStoppingStateRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -def test_stop_trial(transport: str = 'grpc', request_type=vizier_service.StopTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.StopTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_stop_trial_from_dict(): - test_stop_trial(request_type=dict) - - -def test_stop_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - client.stop_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.StopTrialRequest() - - -@pytest.mark.asyncio -async def test_stop_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.StopTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.StopTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_stop_trial_async_from_dict(): - await test_stop_trial_async(request_type=dict) - - -def test_stop_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.StopTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - call.return_value = study.Trial() - client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_stop_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.StopTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_list_optimal_trials(transport: str = 'grpc', request_type=vizier_service.ListOptimalTrialsRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListOptimalTrialsResponse( - ) - response = client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListOptimalTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, vizier_service.ListOptimalTrialsResponse) - - -def test_list_optimal_trials_from_dict(): - test_list_optimal_trials(request_type=dict) - - -def test_list_optimal_trials_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - client.list_optimal_trials() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListOptimalTrialsRequest() - - -@pytest.mark.asyncio -async def test_list_optimal_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListOptimalTrialsRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse( - )) - response = await client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListOptimalTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, vizier_service.ListOptimalTrialsResponse) - - -@pytest.mark.asyncio -async def test_list_optimal_trials_async_from_dict(): - await test_list_optimal_trials_async(request_type=dict) - - -def test_list_optimal_trials_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListOptimalTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - call.return_value = vizier_service.ListOptimalTrialsResponse() - client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_optimal_trials_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListOptimalTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) - await client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_optimal_trials_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListOptimalTrialsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_optimal_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_optimal_trials_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_optimal_trials( - vizier_service.ListOptimalTrialsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_optimal_trials_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListOptimalTrialsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_optimal_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_optimal_trials_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_optimal_trials( - vizier_service.ListOptimalTrialsRequest(), - parent='parent_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = VizierServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = VizierServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = VizierServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.VizierServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.VizierServiceGrpcTransport, - transports.VizierServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.VizierServiceGrpcTransport, - ) - -def test_vizier_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.VizierServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_vizier_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.VizierServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_study', - 'get_study', - 'list_studies', - 'delete_study', - 'lookup_study', - 'suggest_trials', - 'create_trial', - 'get_trial', - 'list_trials', - 'add_trial_measurement', - 'complete_trial', - 'delete_trial', - 'check_trial_early_stopping_state', - 'stop_trial', - 'list_optimal_trials', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -@requires_google_auth_gte_1_25_0 -def test_vizier_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.VizierServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_vizier_service_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.VizierServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_vizier_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.VizierServiceTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_vizier_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - VizierServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_vizier_service_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - VizierServiceClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.VizierServiceGrpcTransport, - transports.VizierServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_vizier_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.VizierServiceGrpcTransport, - transports.VizierServiceGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_vizier_service_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.VizierServiceGrpcTransport, grpc_helpers), - (transports.VizierServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_vizier_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) -def test_vizier_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_vizier_service_host_no_port(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_vizier_service_host_with_port(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_vizier_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.VizierServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_vizier_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.VizierServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) -def test_vizier_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) -def test_vizier_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_vizier_service_grpc_lro_client(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_vizier_service_grpc_lro_async_client(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_custom_job_path(): - project = "squid" - location = "clam" - custom_job = "whelk" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = VizierServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "custom_job": "nudibranch", - } - path = VizierServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_study_path(): - project = "cuttlefish" - location = "mussel" - study = "winkle" - expected = "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) - actual = VizierServiceClient.study_path(project, location, study) - assert expected == actual - - -def test_parse_study_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "study": "abalone", - } - path = VizierServiceClient.study_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_study_path(path) - assert expected == actual - -def test_trial_path(): - project = "squid" - location = "clam" - study = "whelk" - trial = "octopus" - expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - actual = VizierServiceClient.trial_path(project, location, study, trial) - assert expected == actual - - -def test_parse_trial_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "study": "cuttlefish", - "trial": "mussel", - } - path = VizierServiceClient.trial_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_trial_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = VizierServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = VizierServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = VizierServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = VizierServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = VizierServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = VizierServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = VizierServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = VizierServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = VizierServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = VizierServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = VizierServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py index c85d4a96cd..aac9e2bc91 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py @@ -28,8 +28,8 @@ class ImageClassificationPredictionInstance(proto.Message): r"""Prediction input format for Image Classification. Attributes: content (str): - The image bytes or GCS URI to make the - prediction on. + The image bytes or Cloud Storage URI to make + the prediction on. mime_type (str): The MIME type of the content of the image. Only the images in below listed MIME types are diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py index d9895e3372..80a5d797a1 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py @@ -28,8 +28,8 @@ class ImageObjectDetectionPredictionInstance(proto.Message): r"""Prediction input format for Image Object Detection. Attributes: content (str): - The image bytes or GCS URI to make the - prediction on. + The image bytes or Cloud Storage URI to make + the prediction on. mime_type (str): The MIME type of the content of the image. Only the images in below listed MIME types are diff --git a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py index 0b1304d1c3..8f47b27080 100644 --- a/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ b/schema/predict/instance/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py @@ -38,7 +38,7 @@ class TextExtractionPredictionInstance(proto.Message): If a key is provided, the batch prediction result will by mapped to this key. If omitted, then the batch prediction result will contain - the entire input instance. AI Platform will not + the entire input instance. Vertex AI will not check if keys in the request are duplicates, so it is up to the caller to ensure the keys are unique. diff --git a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py index e0cbd81db9..ff7bbd1b86 100644 --- a/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ b/schema/predict/params/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py @@ -39,16 +39,16 @@ class VideoClassificationPredictionParams(proto.Message): 10,000. segment_classification (bool): Set to true to request segment-level - classification. AI Platform returns labels and + classification. Vertex AI returns labels and their confidence scores for the entire time segment of the video that user specified in the input instance. Default value is true shot_classification (bool): Set to true to request shot-level - classification. AI Platform determines the + classification. Vertex AI determines the boundaries for each camera shot in the entire time segment of the video that user specified in - the input instance. AI Platform then returns + the input instance. Vertex AI then returns labels and their confidence scores for each detected shot, along with the start and end time of the shot. @@ -59,15 +59,14 @@ class VideoClassificationPredictionParams(proto.Message): Default value is false one_sec_interval_classification (bool): Set to true to request classification for a - video at one-second intervals. AI Platform - returns labels and their confidence scores for - each second of the entire time segment of the - video that user specified in the input WARNING: - Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. Default value - is false + video at one-second intervals. Vertex AI returns + labels and their confidence scores for each + second of the entire time segment of the video + that user specified in the input WARNING: Model + evaluation is not done for this classification + type, the quality of it depends on the training + data, but there are no metrics provided to + describe that quality. Default value is false """ confidence_threshold = proto.Field( diff --git a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py index 858691c322..ecd16c7250 100644 --- a/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ b/schema/predict/prediction/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py @@ -29,8 +29,7 @@ class ClassificationPredictionResult(proto.Message): Attributes: ids (Sequence[int]): The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. + had been identified. display_names (Sequence[str]): The display names of the AnnotationSpecs that had been identified, order matches the IDs. diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 05b3e83e9d..9cf14fcb76 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -6227,11 +6227,33 @@ def test_parse_model_path(): assert expected == actual -def test_trial_path(): +def test_network_path(): project = "squid" - location = "clam" - study = "whelk" - trial = "octopus" + network = "clam" + expected = "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + actual = JobServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = JobServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_network_path(path) + assert expected == actual + + +def test_trial_path(): + project = "oyster" + location = "nudibranch" + study = "cuttlefish" + trial = "mussel" expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format( project=project, location=location, study=study, trial=trial, ) @@ -6241,10 +6263,10 @@ def test_trial_path(): def test_parse_trial_path(): expected = { - "project": "oyster", - "location": "nudibranch", - "study": "cuttlefish", - "trial": "mussel", + "project": "winkle", + "location": "nautilus", + "study": "scallop", + "trial": "abalone", } path = JobServiceClient.trial_path(**expected) @@ -6254,7 +6276,7 @@ def test_parse_trial_path(): def test_common_billing_account_path(): - billing_account = "winkle" + billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -6264,7 +6286,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "nautilus", + "billing_account": "clam", } path = JobServiceClient.common_billing_account_path(**expected) @@ -6274,7 +6296,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "scallop" + folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = JobServiceClient.common_folder_path(folder) assert expected == actual @@ -6282,7 +6304,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "abalone", + "folder": "octopus", } path = JobServiceClient.common_folder_path(**expected) @@ -6292,7 +6314,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "squid" + organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = JobServiceClient.common_organization_path(organization) assert expected == actual @@ -6300,7 +6322,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "clam", + "organization": "nudibranch", } path = JobServiceClient.common_organization_path(**expected) @@ -6310,7 +6332,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "whelk" + project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = JobServiceClient.common_project_path(project) assert expected == actual @@ -6318,7 +6340,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "octopus", + "project": "mussel", } path = JobServiceClient.common_project_path(**expected) @@ -6328,8 +6350,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "oyster" - location = "nudibranch" + project = "winkle" + location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -6339,8 +6361,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "cuttlefish", - "location": "mussel", + "project": "scallop", + "location": "abalone", } path = JobServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 1f782b88aa..c60e86af14 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -1700,18 +1700,20 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + location = "clam" + dataset = "whelk" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", - "dataset": "octopus", + "project": "octopus", + "location": "oyster", + "dataset": "nudibranch", } path = MigrationServiceClient.dataset_path(**expected) @@ -1721,20 +1723,18 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + project = "cuttlefish" + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "mussel", - "location": "winkle", + "project": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py index 1178cefbb6..4b6a976023 100644 --- a/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py @@ -43,16 +43,22 @@ from google.cloud.aiplatform_v1.services.pipeline_service.transports.base import ( _GOOGLE_AUTH_VERSION, ) +from google.cloud.aiplatform_v1.types import artifact +from google.cloud.aiplatform_v1.types import context from google.cloud.aiplatform_v1.types import deployed_model_ref from google.cloud.aiplatform_v1.types import encryption_spec from google.cloud.aiplatform_v1.types import env_var +from google.cloud.aiplatform_v1.types import execution from google.cloud.aiplatform_v1.types import io from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import pipeline_job +from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job from google.cloud.aiplatform_v1.types import pipeline_service from google.cloud.aiplatform_v1.types import pipeline_state from google.cloud.aiplatform_v1.types import training_pipeline from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline +from google.cloud.aiplatform_v1.types import value from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore @@ -1820,6 +1826,1286 @@ async def test_cancel_training_pipeline_flattened_error_async(): ) +def test_create_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.CreatePipelineJobRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + response = client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == "service_account_value" + assert response.network == "network_value" + + +def test_create_pipeline_job_from_dict(): + test_create_pipeline_job(request_type=dict) + + +def test_create_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + client.create_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.CreatePipelineJobRequest, +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + ) + response = await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CreatePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_pipeline_job.PipelineJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == "service_account_value" + assert response.network == "network_value" + + +@pytest.mark.asyncio +async def test_create_pipeline_job_async_from_dict(): + await test_create_pipeline_job_async(request_type=dict) + + +def test_create_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + call.return_value = gca_pipeline_job.PipelineJob() + client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CreatePipelineJobRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_pipeline_job.PipelineJob() + ) + await client.create_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_pipeline_job( + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name="name_value") + assert args[0].pipeline_job_id == "pipeline_job_id_value" + + +def test_create_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_pipeline_job.PipelineJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_pipeline_job( + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].pipeline_job == gca_pipeline_job.PipelineJob(name="name_value") + assert args[0].pipeline_job_id == "pipeline_job_id_value" + + +@pytest.mark.asyncio +async def test_create_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_pipeline_job( + pipeline_service.CreatePipelineJobRequest(), + parent="parent_value", + pipeline_job=gca_pipeline_job.PipelineJob(name="name_value"), + pipeline_job_id="pipeline_job_id_value", + ) + + +def test_get_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.GetPipelineJobRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + response = client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == "service_account_value" + assert response.network == "network_value" + + +def test_get_pipeline_job_from_dict(): + test_get_pipeline_job(request_type=dict) + + +def test_get_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + client.get_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async( + transport: str = "grpc_asyncio", request_type=pipeline_service.GetPipelineJobRequest +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_job.PipelineJob( + name="name_value", + display_name="display_name_value", + state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, + service_account="service_account_value", + network="network_value", + ) + ) + response = await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.GetPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pipeline_job.PipelineJob) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED + assert response.service_account == "service_account_value" + assert response.network == "network_value" + + +@pytest.mark.asyncio +async def test_get_pipeline_job_async_from_dict(): + await test_get_pipeline_job_async(request_type=dict) + + +def test_get_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + call.return_value = pipeline_job.PipelineJob() + client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.GetPipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_job.PipelineJob() + ) + await client.get_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_pipeline_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_job.PipelineJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_job.PipelineJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_pipeline_job( + pipeline_service.GetPipelineJobRequest(), name="name_value", + ) + + +def test_list_pipeline_jobs( + transport: str = "grpc", request_type=pipeline_service.ListPipelineJobsRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_pipeline_jobs_from_dict(): + test_list_pipeline_jobs(request_type=dict) + + +def test_list_pipeline_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + client.list_pipeline_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.ListPipelineJobsRequest, +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_service.ListPipelineJobsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.ListPipelineJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPipelineJobsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_from_dict(): + await test_list_pipeline_jobs_async(request_type=dict) + + +def test_list_pipeline_jobs_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + call.return_value = pipeline_service.ListPipelineJobsResponse() + client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.ListPipelineJobsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_service.ListPipelineJobsResponse() + ) + await client.list_pipeline_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_pipeline_jobs_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_pipeline_jobs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_pipeline_jobs_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = pipeline_service.ListPipelineJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + pipeline_service.ListPipelineJobsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_pipeline_jobs(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_pipeline_jobs( + pipeline_service.ListPipelineJobsRequest(), parent="parent_value", + ) + + +def test_list_pipeline_jobs_pager(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_pipeline_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) for i in results) + + +def test_list_pipeline_jobs_pages(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + pages = list(client.list_pipeline_jobs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pager(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + async_pager = await client.list_pipeline_jobs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, pipeline_job.PipelineJob) for i in responses) + + +@pytest.mark.asyncio +async def test_list_pipeline_jobs_async_pages(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_pipeline_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[ + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + pipeline_job.PipelineJob(), + ], + next_page_token="abc", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[], next_page_token="def", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(),], next_page_token="ghi", + ), + pipeline_service.ListPipelineJobsResponse( + pipeline_jobs=[pipeline_job.PipelineJob(), pipeline_job.PipelineJob(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_pipeline_jobs(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.DeletePipelineJobRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_pipeline_job_from_dict(): + test_delete_pipeline_job(request_type=dict) + + +def test_delete_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + client.delete_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.DeletePipelineJobRequest, +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.DeletePipelineJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_async_from_dict(): + await test_delete_pipeline_job_async(request_type=dict) + + +def test_delete_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.DeletePipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_delete_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_pipeline_job( + pipeline_service.DeletePipelineJobRequest(), name="name_value", + ) + + +def test_cancel_pipeline_job( + transport: str = "grpc", request_type=pipeline_service.CancelPipelineJobRequest +): + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_pipeline_job_from_dict(): + test_cancel_pipeline_job(request_type=dict) + + +def test_cancel_pipeline_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PipelineServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + client.cancel_pipeline_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async( + transport: str = "grpc_asyncio", + request_type=pipeline_service.CancelPipelineJobRequest, +): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == pipeline_service.CancelPipelineJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_async_from_dict(): + await test_cancel_pipeline_job_async(request_type=dict) + + +def test_cancel_pipeline_job_field_headers(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + call.return_value = None + client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_field_headers_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = pipeline_service.CancelPipelineJobRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_pipeline_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_cancel_pipeline_job_flattened(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_cancel_pipeline_job_flattened_error(): + client = PipelineServiceClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_pipeline_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_pipeline_job(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_cancel_pipeline_job_flattened_error_async(): + client = PipelineServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_pipeline_job( + pipeline_service.CancelPipelineJobRequest(), name="name_value", + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.PipelineServiceGrpcTransport( @@ -1922,6 +3208,11 @@ def test_pipeline_service_base_transport(): "list_training_pipelines", "delete_training_pipeline", "cancel_training_pipeline", + "create_pipeline_job", + "get_pipeline_job", + "list_pipeline_jobs", + "delete_pipeline_job", + "cancel_pipeline_job", ) for method in methods: with pytest.raises(NotImplementedError): @@ -2301,10 +3592,96 @@ def test_pipeline_service_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_endpoint_path(): +def test_artifact_path(): project = "squid" location = "clam" - endpoint = "whelk" + metadata_store = "whelk" + artifact = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format( + project=project, + location=location, + metadata_store=metadata_store, + artifact=artifact, + ) + actual = PipelineServiceClient.artifact_path( + project, location, metadata_store, artifact + ) + assert expected == actual + + +def test_parse_artifact_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "artifact": "mussel", + } + path = PipelineServiceClient.artifact_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_artifact_path(path) + assert expected == actual + + +def test_context_path(): + project = "winkle" + location = "nautilus" + metadata_store = "scallop" + context = "abalone" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + actual = PipelineServiceClient.context_path( + project, location, metadata_store, context + ) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "squid", + "location": "clam", + "metadata_store": "whelk", + "context": "octopus", + } + path = PipelineServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_context_path(path) + assert expected == actual + + +def test_custom_job_path(): + project = "oyster" + location = "nudibranch" + custom_job = "cuttlefish" + expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format( + project=project, location=location, custom_job=custom_job, + ) + actual = PipelineServiceClient.custom_job_path(project, location, custom_job) + assert expected == actual + + +def test_parse_custom_job_path(): + expected = { + "project": "mussel", + "location": "winkle", + "custom_job": "nautilus", + } + path = PipelineServiceClient.custom_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_custom_job_path(path) + assert expected == actual + + +def test_endpoint_path(): + project = "scallop" + location = "abalone" + endpoint = "squid" expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( project=project, location=location, endpoint=endpoint, ) @@ -2314,9 +3691,9 @@ def test_endpoint_path(): def test_parse_endpoint_path(): expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", + "project": "clam", + "location": "whelk", + "endpoint": "octopus", } path = PipelineServiceClient.endpoint_path(**expected) @@ -2325,10 +3702,41 @@ def test_parse_endpoint_path(): assert expected == actual +def test_execution_path(): + project = "oyster" + location = "nudibranch" + metadata_store = "cuttlefish" + execution = "mussel" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format( + project=project, + location=location, + metadata_store=metadata_store, + execution=execution, + ) + actual = PipelineServiceClient.execution_path( + project, location, metadata_store, execution + ) + assert expected == actual + + +def test_parse_execution_path(): + expected = { + "project": "winkle", + "location": "nautilus", + "metadata_store": "scallop", + "execution": "abalone", + } + path = PipelineServiceClient.execution_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_execution_path(path) + assert expected == actual + + def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" + project = "squid" + location = "clam" + model = "whelk" expected = "projects/{project}/locations/{location}/models/{model}".format( project=project, location=location, model=model, ) @@ -2338,9 +3746,9 @@ def test_model_path(): def test_parse_model_path(): expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", + "project": "octopus", + "location": "oyster", + "model": "nudibranch", } path = PipelineServiceClient.model_path(**expected) @@ -2349,10 +3757,56 @@ def test_parse_model_path(): assert expected == actual +def test_network_path(): + project = "cuttlefish" + network = "mussel" + expected = "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + actual = PipelineServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "winkle", + "network": "nautilus", + } + path = PipelineServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_network_path(path) + assert expected == actual + + +def test_pipeline_job_path(): + project = "scallop" + location = "abalone" + pipeline_job = "squid" + expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format( + project=project, location=location, pipeline_job=pipeline_job, + ) + actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) + assert expected == actual + + +def test_parse_pipeline_job_path(): + expected = { + "project": "clam", + "location": "whelk", + "pipeline_job": "octopus", + } + path = PipelineServiceClient.pipeline_job_path(**expected) + + # Check that the path construction is reversible. + actual = PipelineServiceClient.parse_pipeline_job_path(path) + assert expected == actual + + def test_training_pipeline_path(): - project = "squid" - location = "clam" - training_pipeline = "whelk" + project = "oyster" + location = "nudibranch" + training_pipeline = "cuttlefish" expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format( project=project, location=location, training_pipeline=training_pipeline, ) @@ -2364,9 +3818,9 @@ def test_training_pipeline_path(): def test_parse_training_pipeline_path(): expected = { - "project": "octopus", - "location": "oyster", - "training_pipeline": "nudibranch", + "project": "mussel", + "location": "winkle", + "training_pipeline": "nautilus", } path = PipelineServiceClient.training_pipeline_path(**expected) @@ -2376,7 +3830,7 @@ def test_parse_training_pipeline_path(): def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "scallop" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -2386,7 +3840,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "abalone", } path = PipelineServiceClient.common_billing_account_path(**expected) @@ -2396,7 +3850,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "squid" expected = "folders/{folder}".format(folder=folder,) actual = PipelineServiceClient.common_folder_path(folder) assert expected == actual @@ -2404,7 +3858,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "clam", } path = PipelineServiceClient.common_folder_path(**expected) @@ -2414,7 +3868,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "whelk" expected = "organizations/{organization}".format(organization=organization,) actual = PipelineServiceClient.common_organization_path(organization) assert expected == actual @@ -2422,7 +3876,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "octopus", } path = PipelineServiceClient.common_organization_path(**expected) @@ -2432,7 +3886,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "oyster" expected = "projects/{project}".format(project=project,) actual = PipelineServiceClient.common_project_path(project) assert expected == actual @@ -2440,7 +3894,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "nudibranch", } path = PipelineServiceClient.common_project_path(**expected) @@ -2450,8 +3904,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "cuttlefish" + location = "mussel" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -2461,8 +3915,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "winkle", + "location": "nautilus", } path = PipelineServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py index 188abef4d6..2652b0ddee 100644 --- a/tests/unit/gapic/aiplatform_v1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_prediction_service.py @@ -651,8 +651,8 @@ def test_predict_flattened_error(): client.predict( prediction_service.PredictRequest(), endpoint="endpoint_value", - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], ) @@ -668,8 +668,8 @@ async def test_predict_flattened_error_async(): await client.predict( prediction_service.PredictRequest(), endpoint="endpoint_value", - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), + instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py index 32141aea5e..9a343540aa 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py @@ -1678,20 +1678,18 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1701,9 +1699,9 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - location = "clam" - dataset = "whelk" + project = "scallop" + location = "abalone" + dataset = "squid" expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, location=location, dataset=dataset, ) @@ -1713,9 +1711,9 @@ def test_dataset_path(): def test_parse_dataset_path(): expected = { - "project": "octopus", - "location": "oyster", - "dataset": "nudibranch", + "project": "clam", + "location": "whelk", + "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1725,18 +1723,20 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", + "project": "mussel", + "location": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected)